whatcanGOwrong
This commit is contained in:
@@ -0,0 +1,56 @@
|
||||
# Golang CircleCI 2.0 configuration file
|
||||
#
|
||||
# Check https://circleci.com/docs/2.0/language-go/ for more details
|
||||
version: 2.1
|
||||
|
||||
jobs:
|
||||
"golang-1_15": &template
|
||||
machine:
|
||||
# https://circleci.com/docs/2.0/configuration-reference/#available-machine-images
|
||||
image: ubuntu-2004:202010-01
|
||||
# docker_layer_caching: true
|
||||
|
||||
# https://circleci.com/docs/2.0/configuration-reference/#resource_class
|
||||
resource_class: medium
|
||||
|
||||
# Leave working directory unspecified and use defaults:
|
||||
# https://circleci.com/blog/go-v1.11-modules-and-circleci/
|
||||
# working_directory: /go/src/github.com/golang-migrate/migrate
|
||||
|
||||
environment:
|
||||
GO111MODULE: "on"
|
||||
GO_VERSION: "1.15.x"
|
||||
|
||||
steps:
|
||||
# - setup_remote_docker:
|
||||
# version: 19.03.13
|
||||
# docker_layer_caching: true
|
||||
- run: curl -sL -o ~/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
|
||||
- run: curl -sfL -o ~/bin/golangci-lint.sh https://install.goreleaser.com/github.com/golangci/golangci-lint.sh
|
||||
- run: chmod +x ~/bin/gimme ~/bin/golangci-lint.sh
|
||||
- run: eval "$(gimme $GO_VERSION)"
|
||||
- run: golangci-lint.sh -b ~/bin v1.37.0
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v1-{{ arch }}-{{ checksum "go.sum" }}
|
||||
- run: golangci-lint run
|
||||
- run: make test COVERAGE_DIR=/tmp/coverage
|
||||
- save_cache:
|
||||
key: go-mod-v1-{{ arch }}-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- run: go get github.com/mattn/goveralls
|
||||
- run: goveralls -service=circle-ci -coverprofile /tmp/coverage/combined.txt
|
||||
|
||||
"golang-1_16":
|
||||
<<: *template
|
||||
environment:
|
||||
GO_VERSION: "1.16.x"
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- "golang-1_15"
|
||||
- "golang-1_16"
|
||||
@@ -0,0 +1,12 @@
|
||||
# Project
|
||||
FAQ.md
|
||||
README.md
|
||||
LICENSE
|
||||
.gitignore
|
||||
.travis.yml
|
||||
CONTRIBUTING.md
|
||||
MIGRATIONS.md
|
||||
docker-deploy.sh
|
||||
|
||||
# Golang
|
||||
testing
|
||||
Vendored
+39
@@ -0,0 +1,39 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
**Describe the Bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Steps to Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. My migrations look like '...'
|
||||
2. I ran migrate with the following options '....'
|
||||
3. See error
|
||||
|
||||
**Expected Behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Migrate Version**
|
||||
e.g. v3.4.0
|
||||
Obtained by running: `migrate -version`
|
||||
|
||||
**Loaded Source Drivers**
|
||||
e.g. s3, github, go-bindata, gcs, file
|
||||
Obtained by running: `migrate -help`
|
||||
|
||||
**Loaded Database Drivers**
|
||||
e.g. spanner, stub, clickhouse, cockroachdb, crdb-postgres, postgres, postgresql, pgx, redshift, cassandra, cockroach, mysql
|
||||
Obtained by running: `migrate -help`
|
||||
|
||||
**Go Version**
|
||||
e.g. go version go1.11 linux/amd64
|
||||
Obtained by running: `go version`
|
||||
|
||||
**Stacktrace**
|
||||
Please provide if available
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
Vendored
+17
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
+108
@@ -0,0 +1,108 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.x"
|
||||
- uses: actions/checkout@v4
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
with:
|
||||
version: latest
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go: ["1.21.x", "1.22.x"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Run test
|
||||
run: make test COVERAGE_DIR=/tmp/coverage
|
||||
|
||||
- name: Send goveralls coverage
|
||||
uses: shogo82148/actions-goveralls@v1
|
||||
with:
|
||||
path-to-profile: /tmp/coverage/combined.txt
|
||||
flag-name: Go-${{ matrix.go }}
|
||||
parallel: true
|
||||
|
||||
check-coverage:
|
||||
name: Check coverage
|
||||
needs: [test]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: shogo82148/actions-goveralls@v1
|
||||
with:
|
||||
parallel-finished: true
|
||||
|
||||
goreleaser:
|
||||
name: Release a new version
|
||||
needs: [lint, test]
|
||||
runs-on: ubuntu-latest
|
||||
environment: GoReleaser
|
||||
# This job only runs when
|
||||
# 1. When the previous `lint` and `test` jobs has completed successfully
|
||||
# 2. When the repository is not a fork, i.e. it will only run on the official golang-migrate/migrate
|
||||
# 3. When the workflow is triggered by a tag with `v` prefix
|
||||
if: ${{ success() && github.repository == 'golang-migrate/migrate' && startsWith(github.ref, 'refs/tags/v') }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: 2.7
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22.x"
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
username: golangmigrate
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- run: echo "SOURCE=$(make echo-source)" >> $GITHUB_ENV
|
||||
- run: echo "DATABASE=$(make echo-database)" >> $GITHUB_ENV
|
||||
|
||||
- uses: goreleaser/goreleaser-action@v5
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- run: gem install package_cloud
|
||||
- run: package_cloud push golang-migrate/migrate/ubuntu/bionic dist/migrate.linux-amd64.deb
|
||||
env:
|
||||
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
|
||||
- run: package_cloud push golang-migrate/migrate/ubuntu/focal dist/migrate.linux-amd64.deb
|
||||
env:
|
||||
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
|
||||
- run: package_cloud push golang-migrate/migrate/ubuntu/jammy dist/migrate.linux-amd64.deb
|
||||
env:
|
||||
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
|
||||
- run: package_cloud push golang-migrate/migrate/debian/buster dist/migrate.linux-amd64.deb
|
||||
env:
|
||||
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
|
||||
- run: package_cloud push golang-migrate/migrate/debian/bullseye dist/migrate.linux-amd64.deb
|
||||
env:
|
||||
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
|
||||
- run: package_cloud push golang-migrate/migrate/debian/bookworm dist/migrate.linux-amd64.deb
|
||||
env:
|
||||
PACKAGECLOUD_TOKEN: ${{ secrets.PACKAGECLOUD_TOKEN }}
|
||||
@@ -0,0 +1,10 @@
|
||||
.DS_Store
|
||||
cli/build
|
||||
cli/cli
|
||||
cli/migrate
|
||||
.coverage
|
||||
.godoc.pid
|
||||
vendor/
|
||||
.vscode/
|
||||
.idea
|
||||
dist/
|
||||
@@ -0,0 +1,26 @@
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 5m
|
||||
linters:
|
||||
enable:
|
||||
#- golint
|
||||
- interfacer
|
||||
- unconvert
|
||||
#- dupl
|
||||
- goconst
|
||||
- gofmt
|
||||
- misspell
|
||||
- unparam
|
||||
- nakedret
|
||||
- prealloc
|
||||
#- gosec
|
||||
linters-settings:
|
||||
misspell:
|
||||
locale: US
|
||||
issues:
|
||||
max-same-issues: 0
|
||||
max-issues-per-linter: 0
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
# gosec: Duplicated errcheck checks
|
||||
- G104
|
||||
@@ -0,0 +1,102 @@
|
||||
project_name: migrate
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- 386
|
||||
goarm:
|
||||
- 7
|
||||
main: ./cmd/migrate
|
||||
ldflags:
|
||||
- '-w -s -X main.Version={{ .Version }} -extldflags "static"'
|
||||
flags:
|
||||
- "-tags={{ .Env.DATABASE }} {{ .Env.SOURCE }}"
|
||||
- "-trimpath"
|
||||
nfpms:
|
||||
- homepage: "https://github.com/golang-migrate/migrate"
|
||||
maintainer: "dhui@users.noreply.github.com"
|
||||
license: MIT
|
||||
description: "Database migrations"
|
||||
formats:
|
||||
- deb
|
||||
file_name_template: "{{ .ProjectName }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
dockers:
|
||||
- goos: linux
|
||||
goarch: amd64
|
||||
dockerfile: Dockerfile.github-actions
|
||||
use: buildx
|
||||
ids:
|
||||
- migrate
|
||||
image_templates:
|
||||
- 'migrate/migrate:{{ .Tag }}-amd64'
|
||||
build_flag_templates:
|
||||
- '--label=org.opencontainers.image.created={{ .Date }}'
|
||||
- '--label=org.opencontainers.image.title={{ .ProjectName }}'
|
||||
- '--label=org.opencontainers.image.revision={{ .FullCommit }}'
|
||||
- '--label=org.opencontainers.image.version={{ .Version }}'
|
||||
- "--label=org.opencontainers.image.source={{ .GitURL }}"
|
||||
- "--platform=linux/amd64"
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
dockerfile: Dockerfile.github-actions
|
||||
use: buildx
|
||||
ids:
|
||||
- migrate
|
||||
image_templates:
|
||||
- 'migrate/migrate:{{ .Tag }}-arm64'
|
||||
build_flag_templates:
|
||||
- '--label=org.opencontainers.image.created={{ .Date }}'
|
||||
- '--label=org.opencontainers.image.title={{ .ProjectName }}'
|
||||
- '--label=org.opencontainers.image.revision={{ .FullCommit }}'
|
||||
- '--label=org.opencontainers.image.version={{ .Version }}'
|
||||
- "--label=org.opencontainers.image.source={{ .GitURL }}"
|
||||
- "--platform=linux/arm64"
|
||||
|
||||
docker_manifests:
|
||||
- name_template: 'migrate/migrate:{{ .Tag }}'
|
||||
image_templates:
|
||||
- 'migrate/migrate:{{ .Tag }}-amd64'
|
||||
- 'migrate/migrate:{{ .Tag }}-arm64'
|
||||
- name_template: 'migrate/migrate:{{ .Major }}'
|
||||
image_templates:
|
||||
- 'migrate/migrate:{{ .Tag }}-amd64'
|
||||
- 'migrate/migrate:{{ .Tag }}-arm64'
|
||||
- name_template: 'migrate/migrate:latest'
|
||||
image_templates:
|
||||
- 'migrate/migrate:{{ .Tag }}-amd64'
|
||||
- 'migrate/migrate:{{ .Tag }}-arm64'
|
||||
archives:
|
||||
- name_template: "{{ .ProjectName }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
checksum:
|
||||
name_template: 'sha256sum.txt'
|
||||
release:
|
||||
draft: true
|
||||
prerelease: auto
|
||||
source:
|
||||
enabled: true
|
||||
format: zip
|
||||
changelog:
|
||||
skip: false
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- Merge pull request
|
||||
- Merge branch
|
||||
- go mod tidy
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
@@ -0,0 +1,138 @@
|
||||
language: go
|
||||
sudo: required
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
include:
|
||||
# Supported versions of Go: https://golang.org/dl/
|
||||
- go: "1.14.x"
|
||||
- go: "1.15.x"
|
||||
- go: master
|
||||
|
||||
go_import_path: github.com/golang-migrate/migrate
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO111MODULE=on
|
||||
- MIGRATE_TEST_CONTAINER_BOOT_TIMEOUT=60
|
||||
- DOCKER_USERNAME=golangmigrate
|
||||
- secure: "oSOznzUrgr5h45qW4PONkREpisPAt40tnM+KFWtS/Ggu5UI2Ie0CmyYXWuBjbt7B97a4yN9Qzmn8FxJHJ7kk+ABOi3muhkxeIhr6esXbzHhX/Jhv0mj1xkzX7KoVN9oHBz3cOI/QeRyEAO68xjDHNE2kby4RTT9VBt6TQUakKVkqI5qkqLBTADepCjVC+9XhxVxUNyeWKU8ormaUfJBjoNVoDlwXekUPnJenfmfZqXxUInvBCfUyp7Pq+kurBORmg4yc6qOlRYuK67Xw+i5xpjbZouNlXPk0rq7pPy5zjhmZQ3kImoFPvNMeKViDcI6kSIJKtjdhms9/g/6MgXS9HlL5kFy8tYKbsyiHnHB1BsvaLAKXctbUZFDPstgMPADfnad2kZXPrNqIhfWKZrGRWidawCYJ1sKKwYxLMKrtA0umqgMoL90MmBOELhuGmvMV0cFJB+zo+K2YWjEiMGd8xRb5mC5aAy0ZcCehO46jGtpr217EJmMF8Ywr7cFqM2Shg5U2jev9qUpYiXwmPnJKDuoT2ZHuHmPgFIkYiWC5yeJnnmG5bed1sKBp93AFrJX+1Rx5oC4BpNegewmBZKpOSwls/D1uMAeQK3dPmQHLsT6o2VBLfeDGr+zY0R85ywwPZCv00vGol02zYoTqN7eFqr6Qhjr/qx5K1nnxJdFK3Ts="
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg
|
||||
|
||||
|
||||
before_install:
|
||||
# Update docker to latest version: https://docs.travis-ci.com/user/docker/#installing-a-newer-docker-version
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
|
||||
# Install golangci-lint
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0
|
||||
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
|
||||
|
||||
install:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- golangci-lint run
|
||||
- make test COVERAGE_DIR=/tmp/coverage
|
||||
|
||||
after_success:
|
||||
- goveralls -service=travis-ci -coverprofile /tmp/coverage/combined.txt
|
||||
- make list-external-deps > dependency_tree.txt && cat dependency_tree.txt
|
||||
- make build-cli
|
||||
- gem install --no-document fpm
|
||||
- fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m dhui@users.noreply.github.com --url https://github.com/golang-migrate/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/local/bin/migrate
|
||||
|
||||
deploy:
|
||||
- provider: releases
|
||||
api_key:
|
||||
secure: hWH1HLPpzpfA8pXQ93T1qKQVFSpQp0as/JLQ7D91jHuJ8p+RxVeqblDrR6HQY/95R/nyiE9GJmvUolSuw5h449LSrGxPtVWhdh6EnkxlQHlen5XeMhVjRjFV0sE9qGe8v7uAkiTfRO61ktTWHrEAvw5qpyqnNISodmZS78XIasPODQbNlzwINhWhDTHIjXGb4FpizYaL3OGCanrxfR9fQyCaqKGGBjRq3Mfq8U6Yd4mApmsE+uJxgaZV8K5zBqpkSzQRWhcVGNL5DuLsU3gfSJOo7kZeA2G71SHffH577dBoqtCZ4VFv169CoUZehLWCb+7XKJZmHXVujCURATSySLGUOPc6EoLFAn3YtsCA04mS4bZVo5FZPWVwfhjmkhtDR4f6wscKp7r1HsFHSOgm59QfETQdrn4MnZ44H2Jd39axqndn5DvK9EcZVjPHynOPnueXP2u6mTuUgh2VyyWBCDO3CNo0fGlo7VJI69IkIWNSD87K9cHZWYMClyKZkUzS+PmRAhHRYbVd+9ZjKOmnU36kUHNDG/ft1D4ogsY+rhVtXB4lgWDM5adri+EIScYdYnB1/pQexLBigcJY9uE7nQTR0U6QgVNYvun7uRNs40E0c4voSfmPdFO0FlOD2y1oQhnaXfWLbu9nMcTcs4RFGrcC7NzkUN4/WjG8s285V6w=
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.15.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
file:
|
||||
- cli/build/migrate.linux-amd64.tar.gz
|
||||
- cli/build/migrate.linux-armv7.tar.gz
|
||||
- cli/build/migrate.linux-arm64.tar.gz
|
||||
- cli/build/migrate.darwin-amd64.tar.gz
|
||||
- cli/build/migrate.windows-amd64.exe.tar.gz
|
||||
- cli/build/migrate.windows-386.exe.tar.gz
|
||||
- cli/build/sha256sum.txt
|
||||
- dependency_tree.txt
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/xenial
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.15.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/bionic
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.15.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/focal
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.15.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: debian/stretch
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.15.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: debian/buster
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.15.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: script
|
||||
script: ./docker-deploy.sh
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.15.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
@@ -0,0 +1,24 @@
|
||||
# Development, Testing and Contributing
|
||||
|
||||
1. Make sure you have a running Docker daemon
|
||||
(Install for [MacOS](https://docs.docker.com/docker-for-mac/))
|
||||
1. Use a version of Go that supports [modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) (e.g. Go 1.11+)
|
||||
1. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/golang-migrate/migrate`
|
||||
* Ensure that [Go modules are enabled](https://golang.org/cmd/go/#hdr-Preliminary_module_support) (e.g. your repo path or the `GO111MODULE` environment variable are set correctly)
|
||||
1. Install [golangci-lint](https://github.com/golangci/golangci-lint#install)
|
||||
1. Run the linter: `golangci-lint run`
|
||||
1. Confirm tests are working: `make test-short`
|
||||
1. Write awesome code ...
|
||||
1. `make test` to run all tests against all database versions
|
||||
1. Push code and open Pull Request
|
||||
|
||||
Some more helpful commands:
|
||||
|
||||
* You can specify which database/ source tests to run:
|
||||
`make test-short SOURCE='file go_bindata' DATABASE='postgres cassandra'`
|
||||
* After `make test`, run `make html-coverage` which opens a shiny test coverage overview.
|
||||
* `make build-cli` builds the CLI in directory `cli/build/`.
|
||||
* `make list-external-deps` lists all external dependencies for each package
|
||||
* `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server.
|
||||
Repeatedly call `make docs` to refresh the server.
|
||||
* Set the `DOCKER_API_VERSION` environment variable to the latest supported version if you get errors regarding the docker client API version being too new.
|
||||
@@ -0,0 +1,26 @@
|
||||
FROM golang:1.22-alpine3.19 AS builder
|
||||
ARG VERSION
|
||||
|
||||
RUN apk add --no-cache git gcc musl-dev make
|
||||
|
||||
WORKDIR /go/src/github.com/golang-migrate/migrate
|
||||
|
||||
ENV GO111MODULE=on
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . ./
|
||||
|
||||
RUN make build-docker
|
||||
|
||||
FROM alpine:3.19
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY --from=builder /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /usr/local/bin/migrate
|
||||
RUN ln -s /usr/local/bin/migrate /migrate
|
||||
|
||||
ENTRYPOINT ["migrate"]
|
||||
CMD ["--help"]
|
||||
@@ -0,0 +1,17 @@
|
||||
ARG DOCKER_IMAGE
|
||||
FROM $DOCKER_IMAGE
|
||||
|
||||
RUN apk add --no-cache git gcc musl-dev make
|
||||
|
||||
WORKDIR /go/src/github.com/golang-migrate/migrate
|
||||
|
||||
ENV GO111MODULE=on
|
||||
ENV COVERAGE_DIR=/tmp/coverage
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . ./
|
||||
|
||||
CMD ["make", "test"]
|
||||
@@ -0,0 +1,11 @@
|
||||
FROM alpine:3.19
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY migrate /usr/local/bin/migrate
|
||||
|
||||
RUN ln -s /usr/local/bin/migrate /usr/bin/migrate
|
||||
RUN ln -s /usr/local/bin/migrate /migrate
|
||||
|
||||
ENTRYPOINT ["migrate"]
|
||||
CMD ["--help"]
|
||||
@@ -0,0 +1,79 @@
|
||||
# FAQ
|
||||
|
||||
#### How is the code base structured?
|
||||
```
|
||||
/ package migrate (the heart of everything)
|
||||
/cli the CLI wrapper
|
||||
/database database driver and sub directories have the actual driver implementations
|
||||
/source source driver and sub directories have the actual driver implementations
|
||||
```
|
||||
|
||||
#### Why is there no `source/driver.go:Last()`?
|
||||
It's not needed. And unless the source has a "native" way to read a directory in reversed order,
|
||||
it might be expensive to do a full directory scan in order to get the last element.
|
||||
|
||||
#### What is a NilMigration? NilVersion?
|
||||
NilMigration defines a migration without a body. NilVersion is defined as const -1.
|
||||
|
||||
#### What is the difference between uint(version) and int(targetVersion)?
|
||||
version refers to an existing migration version coming from a source and therefore can never be negative.
|
||||
targetVersion can either be a version OR represent a NilVersion, which equals -1.
|
||||
|
||||
#### What's the difference between Next/Previous and Up/Down?
|
||||
```
|
||||
1_first_migration.up.extension next -> 2_second_migration.up.extension ...
|
||||
1_first_migration.down.extension <- previous 2_second_migration.down.extension ...
|
||||
```
|
||||
|
||||
#### Why two separate files (up and down) for a migration?
|
||||
It makes all of our lives easier. No new markup/syntax to learn for users
|
||||
and existing database utility tools continue to work as expected.
|
||||
|
||||
#### How many migrations can migrate handle?
|
||||
Whatever the maximum positive signed integer value is for your platform.
|
||||
For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to
|
||||
the currently run and pre-fetched migrations in memory. Please note that some
|
||||
source drivers need to do build a full "directory" tree first, which puts some
|
||||
heat on the memory consumption.
|
||||
|
||||
#### Are the table tests in migrate_test.go bloated?
|
||||
Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact
|
||||
the tests are very visual now and might help new users understand expected behaviors quickly.
|
||||
Migrate from version x to y and y is the last migration? Just check out the test for
|
||||
that particular case and know what's going on instantly.
|
||||
|
||||
#### What is Docker being used for?
|
||||
Only for testing. See [testing/docker.go](testing/docker.go)
|
||||
|
||||
#### Why not just use docker-compose?
|
||||
It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast
|
||||
and whenever we want, not just once at the beginning of all tests.
|
||||
|
||||
#### Can I maintain my driver in my own repository?
|
||||
Yes, technically thats possible. We want to encourage you to contribute your driver to this repository though.
|
||||
The driver's functionality is dictated by migrate's interfaces. That means there should really
|
||||
just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing,
|
||||
just implemented a bit differently, co-exist somewhere on GitHub. If users have to do research first to find the
|
||||
"best" available driver for a database in order to get started, we would have failed as an open source community.
|
||||
|
||||
#### Can I mix multiple sources during a batch of migrations?
|
||||
No.
|
||||
|
||||
#### What does "dirty" database mean?
|
||||
Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists,
|
||||
which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error
|
||||
and then "force" the expected version.
|
||||
|
||||
#### What happens if two programs try and update the database at the same time?
|
||||
Database-specific locking features are used by *some* database drivers to prevent multiple instances of migrate from running migrations at the same time
|
||||
the same database at the same time. For example, the MySQL driver uses the `GET_LOCK` function, while the Postgres driver uses
|
||||
the `pg_advisory_lock` function.
|
||||
|
||||
#### Do I need to create a table for tracking migration version used?
|
||||
No, it is done automatically.
|
||||
|
||||
#### Can I use migrate with a non-Go project?
|
||||
Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework.
|
||||
|
||||
#### I have got an error `Dirty database version 1. Fix and force version`. What should I do?
|
||||
Keep calm and refer to [the getting started docs](GETTING_STARTED.md#forcing-your-database-version).
|
||||
@@ -0,0 +1,53 @@
|
||||
# Getting started
|
||||
Before you start, you should understand the concept of forward/up and reverse/down database migrations.
|
||||
|
||||
Configure a database for your application. Make sure that your database driver is supported [here](README.md#databases).
|
||||
|
||||
## Create migrations
|
||||
Create some migrations using migrate CLI. Here is an example:
|
||||
```
|
||||
migrate create -ext sql -dir db/migrations -seq create_users_table
|
||||
```
|
||||
Once you create your files, you should fill them.
|
||||
|
||||
**IMPORTANT:** In a project developed by more than one person there is a chance of migrations inconsistency - e.g. two developers can create conflicting migrations, and the developer that created their migration later gets it merged to the repository first.
|
||||
Developers and Teams should keep an eye on such cases (especially during code review).
|
||||
[Here](https://github.com/golang-migrate/migrate/issues/179#issuecomment-475821264) is the issue summary if you would like to read more.
|
||||
|
||||
Consider making your migrations idempotent - we can run the same sql code twice in a row with the same result. This makes our migrations more robust. On the other hand, it causes slightly less control over database schema - e.g. let's say you forgot to drop the table in down migration. You run down migration - the table is still there. When you run up migration again - `CREATE TABLE` would return an error, helping you find an issue in down migration, while `CREATE TABLE IF NOT EXISTS` would not. Use those conditions wisely.
|
||||
|
||||
In case you would like to run several commands/queries in one migration, you should wrap them in a transaction (if your database supports it).
|
||||
This way if one of commands fails, our database will remain unchanged.
|
||||
|
||||
## Run migrations
|
||||
Run your migrations through the CLI or your app and check if they applied expected changes.
|
||||
Just to give you an idea:
|
||||
```
|
||||
migrate -database YOUR_DATABASE_URL -path PATH_TO_YOUR_MIGRATIONS up
|
||||
```
|
||||
|
||||
Just add the code to your app and you're ready to go!
|
||||
|
||||
Before committing your migrations you should run your migrations up, down, and then up again to see if migrations are working properly both ways.
|
||||
(e.g. if you created a table in a migration but reverse migration did not delete it, you will encounter an error when running the forward migration again)
|
||||
It's also worth checking your migrations in a separate, containerized environment. You can find some tools at the [end of this document](#further-reading).
|
||||
|
||||
**IMPORTANT:** If you would like to run multiple instances of your app on different machines be sure to use a database that supports locking when running migrations. Otherwise you may encounter issues.
|
||||
|
||||
## Forcing your database version
|
||||
In case you run a migration that contained an error, migrate will not let you run other migrations on the same database. You will see an error like `Dirty database version 1. Fix and force version`, even when you fix the erred migration. This means your database was marked as 'dirty'.
|
||||
You need to investigate the migration error - was your migration applied partially, or was it not applied at all? Once you know, you should force your database to a version reflecting it's real state. You can do so with `force` command:
|
||||
```
|
||||
migrate -path PATH_TO_YOUR_MIGRATIONS -database YOUR_DATABASE_URL force VERSION
|
||||
```
|
||||
Once you force the version and your migration was fixed, your database is 'clean' again and you can proceed with your migrations.
|
||||
|
||||
For details and example of usage see [this comment](https://github.com/golang-migrate/migrate/issues/282#issuecomment-530743258).
|
||||
|
||||
## Further reading:
|
||||
- [PostgreSQL tutorial](database/postgres/TUTORIAL.md)
|
||||
- [Best practices](MIGRATIONS.md)
|
||||
- [FAQ](FAQ.md)
|
||||
- Tools for testing your migrations in a container:
|
||||
- https://github.com/dhui/dktest
|
||||
- https://github.com/ory/dockertest
|
||||
@@ -0,0 +1,28 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Original Work
|
||||
Copyright (c) 2016 Matthias Kadenbach
|
||||
https://github.com/mattes/migrate
|
||||
|
||||
Modified Work
|
||||
Copyright (c) 2018 Dale Hui
|
||||
https://github.com/golang-migrate/migrate
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@@ -0,0 +1,86 @@
|
||||
# Migrations
|
||||
|
||||
## Migration Filename Format
|
||||
|
||||
A single logical migration is represented as two separate migration files, one
|
||||
to migrate "up" to the specified version from the previous version, and a second
|
||||
to migrate back "down" to the previous version. These migrations can be provided
|
||||
by any one of the supported [migration sources](./README.md#migration-sources).
|
||||
|
||||
The ordering and direction of the migration files is determined by the filenames
|
||||
used for them. `migrate` expects the filenames of migrations to have the format:
|
||||
|
||||
{version}_{title}.up.{extension}
|
||||
{version}_{title}.down.{extension}
|
||||
|
||||
The `title` of each migration is unused, and is only for readability. Similarly,
|
||||
the `extension` of the migration files is not checked by the library, and should
|
||||
be an appropriate format for the database in use (`.sql` for SQL variants, for
|
||||
instance).
|
||||
|
||||
Versions of migrations may be represented as any 64 bit unsigned integer.
|
||||
All migrations are applied upward in order of increasing version number, and
|
||||
downward by decreasing version number.
|
||||
|
||||
Common versioning schemes include incrementing integers:
|
||||
|
||||
1_initialize_schema.down.sql
|
||||
1_initialize_schema.up.sql
|
||||
2_add_table.down.sql
|
||||
2_add_table.up.sql
|
||||
...
|
||||
|
||||
Or timestamps at an appropriate resolution:
|
||||
|
||||
1500360784_initialize_schema.down.sql
|
||||
1500360784_initialize_schema.up.sql
|
||||
1500445949_add_table.down.sql
|
||||
1500445949_add_table.up.sql
|
||||
...
|
||||
|
||||
But any scheme resulting in distinct, incrementing integers as versions is valid.
|
||||
|
||||
It is suggested that the version number of corresponding `up` and `down` migration
|
||||
files be equivalent for clarity, but they are allowed to differ so long as the
|
||||
relative ordering of the migrations is preserved.
|
||||
|
||||
The migration files are permitted to be "empty", in the event that a migration
|
||||
is a no-op or is irreversible. It is recommended to still include both migration
|
||||
files by making the whole migration file consist of a comment.
|
||||
If your database does not support comments, then deleting the migration file will also work.
|
||||
Note, an actual empty file (e.g. a 0 byte file) may cause issues with your database since migrate
|
||||
will attempt to run an empty query. In this case, deleting the migration file will also work.
|
||||
For the rational of this behavior see:
|
||||
[#244 (comment)](https://github.com/golang-migrate/migrate/issues/244#issuecomment-510758270)
|
||||
|
||||
## Migration Content Format
|
||||
|
||||
The format of the migration files themselves varies between database systems.
|
||||
Different databases have different semantics around schema changes and when and
|
||||
how they are allowed to occur
|
||||
(for instance, [if schema changes can occur within a transaction](https://wiki.postgresql.org/wiki/Transactional_DDL_in_PostgreSQL:_A_Competitive_Analysis)).
|
||||
|
||||
As such, the `migrate` library has little to no checking around the format of
|
||||
migration sources. The migration files are generally processed directly by the
|
||||
drivers as raw operations.
|
||||
|
||||
## Reversibility of Migrations
|
||||
|
||||
Best practice for writing schema migration is that all migrations should be
|
||||
reversible. It should in theory be possible for run migrations down and back up
|
||||
through any and all versions with the state being fully cleaned and recreated
|
||||
by doing so.
|
||||
|
||||
By adhering to this recommended practice, development and deployment of new code
|
||||
is cleaner and easier (cleaning database state for a new feature should be as
|
||||
easy as migrating down to a prior version, and back up to the latest).
|
||||
|
||||
As opposed to some other migration libraries, `migrate` represents up and down
|
||||
migrations as separate files. This prevents any non-standard file syntax from
|
||||
being introduced which may result in unintended behavior or errors, depending
|
||||
on what database is processing the file.
|
||||
|
||||
While it is technically possible for an up or down migration to exist on its own
|
||||
without an equivalently versioned counterpart, it is strongly recommended to
|
||||
always include a down migration which cleans up the state of the corresponding
|
||||
up migration.
|
||||
@@ -0,0 +1,120 @@
|
||||
SOURCE ?= file go_bindata github github_ee bitbucket aws_s3 google_cloud_storage godoc_vfs gitlab
|
||||
DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb yugabytedb clickhouse mongodb sqlserver firebird neo4j pgx pgx5 rqlite
|
||||
DATABASE_TEST ?= $(DATABASE) sqlite sqlite3 sqlcipher
|
||||
VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-)
|
||||
TEST_FLAGS ?=
|
||||
REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)")
|
||||
COVERAGE_DIR ?= .coverage
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' ./cmd/migrate
|
||||
|
||||
build-docker:
|
||||
CGO_ENABLED=0 go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$(DATABASE) $(SOURCE)" ./cmd/migrate
|
||||
|
||||
build-cli: clean
|
||||
-mkdir ./cli/build
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o ../../cli/build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -a -o ../../cli/build/migrate.linux-armv7 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -o ../../cli/build/migrate.linux-arm64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -a -o ../../cli/build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=386 go build -a -o ../../cli/build/migrate.windows-386.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -a -o ../../cli/build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {}
|
||||
cd ./cli/build && shasum -a 256 * > sha256sum.txt
|
||||
cat ./cli/build/sha256sum.txt
|
||||
|
||||
|
||||
clean:
|
||||
-rm -r ./cli/build
|
||||
|
||||
|
||||
test-short:
|
||||
make test-with-flags --ignore-errors TEST_FLAGS='-short'
|
||||
|
||||
|
||||
test:
|
||||
@-rm -r $(COVERAGE_DIR)
|
||||
@mkdir $(COVERAGE_DIR)
|
||||
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/combined.txt -bench=. -benchmem -timeout 20m'
|
||||
|
||||
|
||||
test-with-flags:
|
||||
@echo SOURCE: $(SOURCE)
|
||||
@echo DATABASE_TEST: $(DATABASE_TEST)
|
||||
|
||||
@go test $(TEST_FLAGS) ./...
|
||||
|
||||
|
||||
kill-orphaned-docker-containers:
|
||||
docker rm -f $(shell docker ps -aq --filter label=migrate_test)
|
||||
|
||||
|
||||
html-coverage:
|
||||
go tool cover -html=$(COVERAGE_DIR)/combined.txt
|
||||
|
||||
|
||||
list-external-deps:
|
||||
$(call external_deps,'.')
|
||||
$(call external_deps,'./cli/...')
|
||||
$(call external_deps,'./testing/...')
|
||||
|
||||
$(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...'))
|
||||
$(call external_deps,'./source/testing/...')
|
||||
$(call external_deps,'./source/stub/...')
|
||||
|
||||
$(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...'))
|
||||
$(call external_deps,'./database/testing/...')
|
||||
$(call external_deps,'./database/stub/...')
|
||||
|
||||
|
||||
restore-import-paths:
|
||||
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \;
|
||||
|
||||
|
||||
rewrite-import-paths:
|
||||
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \;
|
||||
|
||||
|
||||
# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs
|
||||
docs:
|
||||
-make kill-docs
|
||||
nohup godoc -play -http=127.0.0.1:6064 </dev/null >/dev/null 2>&1 & echo $$! > .godoc.pid
|
||||
cat .godoc.pid
|
||||
|
||||
|
||||
kill-docs:
|
||||
@cat .godoc.pid
|
||||
kill -9 $$(cat .godoc.pid)
|
||||
rm .godoc.pid
|
||||
|
||||
|
||||
open-docs:
|
||||
open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate
|
||||
|
||||
|
||||
# example: make release V=0.0.0
|
||||
release:
|
||||
git tag v$(V)
|
||||
@read -p "Press enter to confirm and push to origin ..." && git push origin v$(V)
|
||||
|
||||
echo-source:
|
||||
@echo "$(SOURCE)"
|
||||
|
||||
echo-database:
|
||||
@echo "$(DATABASE)"
|
||||
|
||||
|
||||
define external_deps
|
||||
@echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
|
||||
|
||||
endef
|
||||
|
||||
|
||||
.PHONY: build build-docker build-cli clean test-short test test-with-flags html-coverage \
|
||||
restore-import-paths rewrite-import-paths list-external-deps release \
|
||||
docs kill-docs open-docs kill-orphaned-docker-containers echo-source echo-database
|
||||
|
||||
SHELL = /bin/sh
|
||||
RAND = $(shell echo $$RANDOM)
|
||||
|
||||
@@ -0,0 +1,196 @@
|
||||
[](https://github.com/golang-migrate/migrate/actions/workflows/ci.yaml?query=branch%3Amaster)
|
||||
[](https://pkg.go.dev/github.com/golang-migrate/migrate/v4)
|
||||
[](https://coveralls.io/github/golang-migrate/migrate?branch=master)
|
||||
[](https://packagecloud.io/golang-migrate/migrate?filter=debs)
|
||||
[](https://hub.docker.com/r/migrate/migrate/)
|
||||

|
||||
[](https://github.com/golang-migrate/migrate/releases)
|
||||
[](https://goreportcard.com/report/github.com/golang-migrate/migrate/v4)
|
||||
|
||||
# migrate
|
||||
|
||||
__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__
|
||||
|
||||
* Migrate reads migrations from [sources](#migration-sources)
|
||||
and applies them in correct order to a [database](#databases).
|
||||
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
|
||||
(Keeps the drivers lightweight, too.)
|
||||
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
|
||||
|
||||
Forked from [mattes/migrate](https://github.com/mattes/migrate)
|
||||
|
||||
## Databases
|
||||
|
||||
Database drivers run migrations. [Add a new database?](database/driver.go)
|
||||
|
||||
* [PostgreSQL](database/postgres)
|
||||
* [PGX v4](database/pgx)
|
||||
* [PGX v5](database/pgx/v5)
|
||||
* [Redshift](database/redshift)
|
||||
* [Ql](database/ql)
|
||||
* [Cassandra / ScyllaDB](database/cassandra)
|
||||
* [SQLite](database/sqlite)
|
||||
* [SQLite3](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
|
||||
* [SQLCipher](database/sqlcipher)
|
||||
* [MySQL / MariaDB](database/mysql)
|
||||
* [Neo4j](database/neo4j)
|
||||
* [MongoDB](database/mongodb)
|
||||
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
|
||||
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
|
||||
* [Google Cloud Spanner](database/spanner)
|
||||
* [CockroachDB](database/cockroachdb)
|
||||
* [YugabyteDB](database/yugabytedb)
|
||||
* [ClickHouse](database/clickhouse)
|
||||
* [Firebird](database/firebird)
|
||||
* [MS SQL Server](database/sqlserver)
|
||||
* [rqlite](database/rqlite)
|
||||
|
||||
### Database URLs
|
||||
|
||||
Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?param1=true¶m2=false`
|
||||
|
||||
Any [reserved URL characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) need to be escaped. Note, the `%` character also [needs to be escaped](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_the_percent_character)
|
||||
|
||||
Explicitly, the following characters need to be escaped:
|
||||
`!`, `#`, `$`, `%`, `&`, `'`, `(`, `)`, `*`, `+`, `,`, `/`, `:`, `;`, `=`, `?`, `@`, `[`, `]`
|
||||
|
||||
It's easiest to always run the URL parts of your DB connection URL (e.g. username, password, etc) through an URL encoder. See the example Python snippets below:
|
||||
|
||||
```bash
|
||||
$ python3 -c 'import urllib.parse; print(urllib.parse.quote(input("String to encode: "), ""))'
|
||||
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
|
||||
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
|
||||
$ python2 -c 'import urllib; print urllib.quote(raw_input("String to encode: "), "")'
|
||||
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
|
||||
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
|
||||
$
|
||||
```
|
||||
|
||||
## Migration Sources
|
||||
|
||||
Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go)
|
||||
|
||||
* [Filesystem](source/file) - read from filesystem
|
||||
* [io/fs](source/iofs) - read from a Go [io/fs](https://pkg.go.dev/io/fs#FS)
|
||||
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
|
||||
* [pkger](source/pkger) - read from embedded binary data ([markbates/pkger](https://github.com/markbates/pkger))
|
||||
* [GitHub](source/github) - read from remote GitHub repositories
|
||||
* [GitHub Enterprise](source/github_ee) - read from remote GitHub Enterprise repositories
|
||||
* [Bitbucket](source/bitbucket) - read from remote Bitbucket repositories
|
||||
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
|
||||
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
|
||||
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
|
||||
|
||||
## CLI usage
|
||||
|
||||
* Simple wrapper around this library.
|
||||
* Handles ctrl+c (SIGINT) gracefully.
|
||||
* No config search paths, no config files, no magic ENV var injections.
|
||||
|
||||
__[CLI Documentation](cmd/migrate)__
|
||||
|
||||
### Basic usage
|
||||
|
||||
```bash
|
||||
$ migrate -source file://path/to/migrations -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
### Docker usage
|
||||
|
||||
```bash
|
||||
$ docker run -v {{ migration dir }}:/migrations --network host migrate/migrate
|
||||
-path=/migrations/ -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
## Use in your Go project
|
||||
|
||||
* API is stable and frozen for this release (v3 & v4).
|
||||
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
|
||||
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
|
||||
* Bring your own logger.
|
||||
* Uses `io.Reader` streams internally for low memory overhead.
|
||||
* Thread-safe and no goroutine leaks.
|
||||
|
||||
__[Go Documentation](https://pkg.go.dev/github.com/golang-migrate/migrate/v4)__
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/github"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m, err := migrate.New(
|
||||
"github://mattes:personal-access-token@mattes/migrate_test",
|
||||
"postgres://localhost:5432/database?sslmode=enable")
|
||||
m.Steps(2)
|
||||
}
|
||||
```
|
||||
|
||||
Want to use an existing database client?
|
||||
|
||||
```go
|
||||
import (
|
||||
"database/sql"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable")
|
||||
driver, err := postgres.WithInstance(db, &postgres.Config{})
|
||||
m, err := migrate.NewWithDatabaseInstance(
|
||||
"file:///migrations",
|
||||
"postgres", driver)
|
||||
m.Up() // or m.Step(2) if you want to explicitly set the number of migrations to run
|
||||
}
|
||||
```
|
||||
|
||||
## Getting started
|
||||
|
||||
Go to [getting started](GETTING_STARTED.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
* [CockroachDB](database/cockroachdb/TUTORIAL.md)
|
||||
* [PostgreSQL](database/postgres/TUTORIAL.md)
|
||||
|
||||
(more tutorials to come)
|
||||
|
||||
## Migration files
|
||||
|
||||
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
|
||||
|
||||
```bash
|
||||
1481574547_create_users_table.up.sql
|
||||
1481574547_create_users_table.down.sql
|
||||
```
|
||||
|
||||
[Best practices: How to write migrations.](MIGRATIONS.md)
|
||||
|
||||
## Coming from another db migration tool?
|
||||
|
||||
Check out [migradaptor](https://github.com/musinit/migradaptor/).
|
||||
*Note: migradaptor is not affiliated or supported by this project*
|
||||
|
||||
## Versions
|
||||
|
||||
Version | Supported? | Import | Notes
|
||||
--------|------------|--------|------
|
||||
**master** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | New features and bug fixes arrive here first |
|
||||
**v4** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | Used for stable releases |
|
||||
**v3** | :x: | `import "github.com/golang-migrate/migrate"` (with package manager) or `import "gopkg.in/golang-migrate/migrate.v3"` (not recommended) | **DO NOT USE** - No longer supported |
|
||||
|
||||
## Development and Contributing
|
||||
|
||||
Yes, please! [`Makefile`](Makefile) is your friend,
|
||||
read the [development guide](CONTRIBUTING.md).
|
||||
|
||||
Also have a look at the [FAQ](FAQ.md).
|
||||
|
||||
---
|
||||
|
||||
Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database).
|
||||
@@ -0,0 +1,16 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| master | :white_check_mark: |
|
||||
| 4.x | :white_check_mark: |
|
||||
| 3.x | :x: |
|
||||
| < 3.0 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
We prefer [coordinated disclosures](https://en.wikipedia.org/wiki/Coordinated_vulnerability_disclosure). To start one, create a GitHub security advisory following [these instructions](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability)
|
||||
|
||||
Please suggest potential impact and urgency in your reports.
|
||||
@@ -0,0 +1,3 @@
|
||||
# Deprecated
|
||||
|
||||
Use [cmd/migrate](../cmd/migrate) instead
|
||||
@@ -0,0 +1,8 @@
|
||||
package main
|
||||
|
||||
import "github.com/golang-migrate/migrate/v4/internal/cli"
|
||||
|
||||
// Deprecated, please use cmd/migrate
|
||||
func main() {
|
||||
cli.Main(Version)
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
package main
|
||||
|
||||
// Version is set in Makefile with build flags
|
||||
var Version = "dev"
|
||||
@@ -0,0 +1,138 @@
|
||||
# migrate CLI
|
||||
|
||||
## Installation
|
||||
|
||||
### Download pre-built binary (Windows, MacOS, or Linux)
|
||||
|
||||
[Release Downloads](https://github.com/golang-migrate/migrate/releases)
|
||||
|
||||
```bash
|
||||
$ curl -L https://github.com/golang-migrate/migrate/releases/download/$version/migrate.$os-$arch.tar.gz | tar xvz
|
||||
```
|
||||
|
||||
### MacOS
|
||||
|
||||
```bash
|
||||
$ brew install golang-migrate
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
||||
Using [scoop](https://scoop.sh/)
|
||||
|
||||
```bash
|
||||
$ scoop install migrate
|
||||
```
|
||||
|
||||
### Linux (*.deb package)
|
||||
|
||||
```bash
|
||||
$ curl -L https://packagecloud.io/golang-migrate/migrate/gpgkey | apt-key add -
|
||||
$ echo "deb https://packagecloud.io/golang-migrate/migrate/ubuntu/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/migrate.list
|
||||
$ apt-get update
|
||||
$ apt-get install -y migrate
|
||||
```
|
||||
|
||||
### With Go toolchain
|
||||
|
||||
#### Versioned
|
||||
|
||||
```bash
|
||||
$ go get -u -d github.com/golang-migrate/migrate/cmd/migrate
|
||||
$ cd $GOPATH/src/github.com/golang-migrate/migrate/cmd/migrate
|
||||
$ git checkout $TAG # e.g. v4.1.0
|
||||
$ # Go 1.15 and below
|
||||
$ go build -tags 'postgres' -ldflags="-X main.Version=$(git describe --tags)" -o $GOPATH/bin/migrate $GOPATH/src/github.com/golang-migrate/migrate/cmd/migrate
|
||||
$ # Go 1.16+
|
||||
$ go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@$TAG
|
||||
```
|
||||
|
||||
#### Unversioned
|
||||
|
||||
```bash
|
||||
$ # Go 1.15 and below
|
||||
$ go get -tags 'postgres' -u github.com/golang-migrate/migrate/cmd/migrate
|
||||
$ # Go 1.16+
|
||||
$ go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@latest
|
||||
```
|
||||
|
||||
#### Notes
|
||||
|
||||
1. Requires a version of Go that [supports modules](https://golang.org/cmd/go/#hdr-Preliminary_module_support). e.g. Go 1.11+
|
||||
1. These examples build the cli which will only work with postgres. In order
|
||||
to build the cli for use with other databases, replace the `postgres` build tag
|
||||
with the appropriate database tag(s) for the databases desired. The tags
|
||||
correspond to the names of the sub-packages underneath the
|
||||
[`database`](../../database) package.
|
||||
1. Similarly to the database build tags, if you need to support other sources, use the appropriate build tag(s).
|
||||
1. Support for build constraints will be removed in the future: https://github.com/golang-migrate/migrate/issues/60
|
||||
1. For versions of Go 1.15 and lower, [make sure](https://github.com/golang-migrate/migrate/pull/257#issuecomment-705249902) you're not installing the `migrate` CLI from a module. e.g. there should not be any `go.mod` files in your current directory or any directory from your current directory to the root
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
$ migrate -help
|
||||
Usage: migrate OPTIONS COMMAND [arg...]
|
||||
migrate [ -version | -help ]
|
||||
|
||||
Options:
|
||||
-source Location of the migrations (driver://url)
|
||||
-path Shorthand for -source=file://path
|
||||
-database Run migrations against this database (driver://url)
|
||||
-prefetch N Number of migrations to load in advance before executing (default 10)
|
||||
-lock-timeout N Allow N seconds to acquire database lock (default 15)
|
||||
-verbose Print verbose logging
|
||||
-version Print version
|
||||
-help Print usage
|
||||
|
||||
Commands:
|
||||
create [-ext E] [-dir D] [-seq] [-digits N] [-format] NAME
|
||||
Create a set of timestamped up/down migrations titled NAME, in directory D with extension E.
|
||||
Use -seq option to generate sequential up/down migrations with N digits.
|
||||
Use -format option to specify a Go time format string.
|
||||
goto V Migrate to version V
|
||||
up [N] Apply all or N up migrations
|
||||
down [N] Apply all or N down migrations
|
||||
drop Drop everything inside database
|
||||
force V Set version V but don't run migration (ignores dirty state)
|
||||
version Print current migration version
|
||||
```
|
||||
|
||||
So let's say you want to run the first two migrations
|
||||
|
||||
```bash
|
||||
$ migrate -source file://path/to/migrations -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
If your migrations are hosted on github
|
||||
|
||||
```bash
|
||||
$ migrate -source github://mattes:personal-access-token@mattes/migrate_test \
|
||||
-database postgres://localhost:5432/database down 2
|
||||
```
|
||||
|
||||
The CLI will gracefully stop at a safe point when SIGINT (ctrl+c) is received.
|
||||
Send SIGKILL for immediate halt.
|
||||
|
||||
## Reading CLI arguments from somewhere else
|
||||
|
||||
### ENV variables
|
||||
|
||||
```bash
|
||||
$ migrate -database "$MY_MIGRATE_DATABASE"
|
||||
```
|
||||
|
||||
### JSON files
|
||||
|
||||
Check out https://stedolan.github.io/jq/
|
||||
|
||||
```bash
|
||||
$ migrate -database "$(cat config.json | jq -r '.database')"
|
||||
```
|
||||
|
||||
### YAML files
|
||||
|
||||
```bash
|
||||
$ migrate -database "$(cat config/database.yml | ruby -ryaml -e "print YAML.load(STDIN.read)['database']")"
|
||||
$ migrate -database "$(cat config/database.yml | python -c 'import yaml,sys;print yaml.safe_load(sys.stdin)["database"]')"
|
||||
```
|
||||
+15
@@ -0,0 +1,15 @@
|
||||
FROM ubuntu:bionic
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg-agent
|
||||
|
||||
RUN curl -sSL https://packagecloud.io/golang-migrate/migrate/gpgkey | apt-key add -
|
||||
RUN echo "deb https://packagecloud.io/golang-migrate/migrate/ubuntu/ bionic main" > /etc/apt/sources.list.d/migrate.list
|
||||
RUN apt-get update && \
|
||||
apt-get install -y migrate
|
||||
|
||||
RUN migrate -version
|
||||
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "github.com/golang-migrate/migrate/v4/internal/cli"
|
||||
|
||||
func main() {
|
||||
cli.Main(Version)
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
package main
|
||||
|
||||
// Version is set in Makefile with build flags
|
||||
var Version = "dev"
|
||||
@@ -0,0 +1,45 @@
|
||||
# Cassandra / ScyllaDB
|
||||
|
||||
* `Drop()` method will not work on Cassandra 2.X because it rely on
|
||||
system_schema table which comes with 3.X
|
||||
* Other methods should work properly but are **not tested**
|
||||
* The Cassandra driver (gocql) does not natively support executing multiple statements in a single query. To allow for multiple statements in a single migration, you can use the `x-multi-statement` param. There are two important caveats:
|
||||
* This mode splits the migration text into separately-executed statements by a semi-colon `;`. Thus `x-multi-statement` cannot be used when a statement in the migration contains a string with a semi-colon.
|
||||
* The queries are not executed in any sort of transaction/batch, meaning you are responsible for fixing partial migrations.
|
||||
|
||||
**ScyllaDB**
|
||||
|
||||
* No additional configuration is required since it is a drop-in replacement for Cassandra.
|
||||
* The `Drop()` method` works for ScyllaDB 5.1
|
||||
|
||||
|
||||
## Usage
|
||||
`cassandra://host:port/keyspace?param1=value¶m2=value2`
|
||||
|
||||
|
||||
| URL Query | Default value | Description |
|
||||
|------------|-------------|-----------|
|
||||
| `x-migrations-table` | schema_migrations | Name of the migrations table |
|
||||
| `x-multi-statement` | false | Enable multiple statements to be ran in a single migration (See note above) |
|
||||
| `port` | 9042 | The port to bind to |
|
||||
| `consistency` | ALL | Migration consistency
|
||||
| `protocol` | | Cassandra protocol version (3 or 4)
|
||||
| `timeout` | 1 minute | Migration timeout
|
||||
| `connect-timeout` | 600ms | Initial connection timeout to the cluster |
|
||||
| `username` | nil | Username to use when authenticating. |
|
||||
| `password` | nil | Password to use when authenticating. |
|
||||
| `sslcert` | | Cert file location. The file must contain PEM encoded data. |
|
||||
| `sslkey` | | Key file location. The file must contain PEM encoded data. |
|
||||
| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. |
|
||||
| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) |
|
||||
| `disable-host-lookup`| false | Disable initial host lookup. |
|
||||
|
||||
`timeout` is parsed using [time.ParseDuration(s string)](https://golang.org/pkg/time/#ParseDuration)
|
||||
|
||||
|
||||
## Upgrading from v1
|
||||
|
||||
1. Write down the current migration version from schema_migrations
|
||||
2. `DROP TABLE schema_migrations`
|
||||
4. Download and install the latest migrate version.
|
||||
5. Force the current migration version with `migrate force <current_version>`.
|
||||
+352
@@ -0,0 +1,352 @@
|
||||
package cassandra
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
"github.com/golang-migrate/migrate/v4/database/multistmt"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
func init() {
|
||||
db := new(Cassandra)
|
||||
database.Register("cassandra", db)
|
||||
}
|
||||
|
||||
var (
|
||||
multiStmtDelimiter = []byte(";")
|
||||
|
||||
DefaultMultiStatementMaxSize = 10 * 1 << 20 // 10 MB
|
||||
)
|
||||
|
||||
var DefaultMigrationsTable = "schema_migrations"
|
||||
|
||||
var (
|
||||
ErrNilConfig = errors.New("no config")
|
||||
ErrNoKeyspace = errors.New("no keyspace provided")
|
||||
ErrDatabaseDirty = errors.New("database is dirty")
|
||||
ErrClosedSession = errors.New("session is closed")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
MigrationsTable string
|
||||
KeyspaceName string
|
||||
MultiStatementEnabled bool
|
||||
MultiStatementMaxSize int
|
||||
}
|
||||
|
||||
type Cassandra struct {
|
||||
session *gocql.Session
|
||||
isLocked atomic.Bool
|
||||
|
||||
// Open and WithInstance need to guarantee that config is never nil
|
||||
config *Config
|
||||
}
|
||||
|
||||
func WithInstance(session *gocql.Session, config *Config) (database.Driver, error) {
|
||||
if config == nil {
|
||||
return nil, ErrNilConfig
|
||||
} else if len(config.KeyspaceName) == 0 {
|
||||
return nil, ErrNoKeyspace
|
||||
}
|
||||
|
||||
if session.Closed() {
|
||||
return nil, ErrClosedSession
|
||||
}
|
||||
|
||||
if len(config.MigrationsTable) == 0 {
|
||||
config.MigrationsTable = DefaultMigrationsTable
|
||||
}
|
||||
|
||||
if config.MultiStatementMaxSize <= 0 {
|
||||
config.MultiStatementMaxSize = DefaultMultiStatementMaxSize
|
||||
}
|
||||
|
||||
c := &Cassandra{
|
||||
session: session,
|
||||
config: config,
|
||||
}
|
||||
|
||||
if err := c.ensureVersionTable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Open(url string) (database.Driver, error) {
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for missing mandatory attributes
|
||||
if len(u.Path) == 0 {
|
||||
return nil, ErrNoKeyspace
|
||||
}
|
||||
|
||||
cluster := gocql.NewCluster(u.Host)
|
||||
cluster.Keyspace = strings.TrimPrefix(u.Path, "/")
|
||||
cluster.Consistency = gocql.All
|
||||
cluster.Timeout = 1 * time.Minute
|
||||
|
||||
if len(u.Query().Get("username")) > 0 && len(u.Query().Get("password")) > 0 {
|
||||
authenticator := gocql.PasswordAuthenticator{
|
||||
Username: u.Query().Get("username"),
|
||||
Password: u.Query().Get("password"),
|
||||
}
|
||||
cluster.Authenticator = authenticator
|
||||
}
|
||||
|
||||
// Retrieve query string configuration
|
||||
if len(u.Query().Get("consistency")) > 0 {
|
||||
var consistency gocql.Consistency
|
||||
consistency, err = parseConsistency(u.Query().Get("consistency"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cluster.Consistency = consistency
|
||||
}
|
||||
if len(u.Query().Get("protocol")) > 0 {
|
||||
var protoversion int
|
||||
protoversion, err = strconv.Atoi(u.Query().Get("protocol"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cluster.ProtoVersion = protoversion
|
||||
}
|
||||
if len(u.Query().Get("timeout")) > 0 {
|
||||
var timeout time.Duration
|
||||
timeout, err = time.ParseDuration(u.Query().Get("timeout"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cluster.Timeout = timeout
|
||||
}
|
||||
if len(u.Query().Get("connect-timeout")) > 0 {
|
||||
var connectTimeout time.Duration
|
||||
connectTimeout, err = time.ParseDuration(u.Query().Get("connect-timeout"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cluster.ConnectTimeout = connectTimeout
|
||||
}
|
||||
|
||||
if len(u.Query().Get("sslmode")) > 0 {
|
||||
if u.Query().Get("sslmode") != "disable" {
|
||||
sslOpts := &gocql.SslOptions{}
|
||||
|
||||
if len(u.Query().Get("sslrootcert")) > 0 {
|
||||
sslOpts.CaPath = u.Query().Get("sslrootcert")
|
||||
}
|
||||
if len(u.Query().Get("sslcert")) > 0 {
|
||||
sslOpts.CertPath = u.Query().Get("sslcert")
|
||||
}
|
||||
if len(u.Query().Get("sslkey")) > 0 {
|
||||
sslOpts.KeyPath = u.Query().Get("sslkey")
|
||||
}
|
||||
|
||||
if u.Query().Get("sslmode") == "verify-full" {
|
||||
sslOpts.EnableHostVerification = true
|
||||
}
|
||||
|
||||
cluster.SslOpts = sslOpts
|
||||
}
|
||||
}
|
||||
|
||||
if len(u.Query().Get("disable-host-lookup")) > 0 {
|
||||
if flag, err := strconv.ParseBool(u.Query().Get("disable-host-lookup")); err != nil && flag {
|
||||
cluster.DisableInitialHostLookup = true
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
session, err := cluster.CreateSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
multiStatementMaxSize := DefaultMultiStatementMaxSize
|
||||
if s := u.Query().Get("x-multi-statement-max-size"); len(s) > 0 {
|
||||
multiStatementMaxSize, err = strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return WithInstance(session, &Config{
|
||||
KeyspaceName: strings.TrimPrefix(u.Path, "/"),
|
||||
MigrationsTable: u.Query().Get("x-migrations-table"),
|
||||
MultiStatementEnabled: u.Query().Get("x-multi-statement") == "true",
|
||||
MultiStatementMaxSize: multiStatementMaxSize,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Cassandra) Close() error {
|
||||
c.session.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Lock() error {
|
||||
if !c.isLocked.CAS(false, true) {
|
||||
return database.ErrLocked
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Unlock() error {
|
||||
if !c.isLocked.CAS(true, false) {
|
||||
return database.ErrNotLocked
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) Run(migration io.Reader) error {
|
||||
if c.config.MultiStatementEnabled {
|
||||
var err error
|
||||
if e := multistmt.Parse(migration, multiStmtDelimiter, c.config.MultiStatementMaxSize, func(m []byte) bool {
|
||||
tq := strings.TrimSpace(string(m))
|
||||
if tq == "" {
|
||||
return true
|
||||
}
|
||||
if e := c.session.Query(tq).Exec(); e != nil {
|
||||
err = database.Error{OrigErr: e, Err: "migration failed", Query: m}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}); e != nil {
|
||||
return e
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
migr, err := io.ReadAll(migration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run migration
|
||||
if err := c.session.Query(string(migr)).Exec(); err != nil {
|
||||
// TODO: cast to Cassandra error and get line number
|
||||
return database.Error{OrigErr: err, Err: "migration failed", Query: migr}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cassandra) SetVersion(version int, dirty bool) error {
|
||||
// DELETE instead of TRUNCATE because AWS Keyspaces does not support it
|
||||
// see: https://docs.aws.amazon.com/keyspaces/latest/devguide/cassandra-apis.html
|
||||
squery := `SELECT version FROM "` + c.config.MigrationsTable + `"`
|
||||
dquery := `DELETE FROM "` + c.config.MigrationsTable + `" WHERE version = ?`
|
||||
iter := c.session.Query(squery).Iter()
|
||||
var previous int
|
||||
for iter.Scan(&previous) {
|
||||
if err := c.session.Query(dquery, previous).Exec(); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(dquery)}
|
||||
}
|
||||
}
|
||||
if err := iter.Close(); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(squery)}
|
||||
}
|
||||
|
||||
// Also re-write the schema version for nil dirty versions to prevent
|
||||
// empty schema version for failed down migration on the first migration
|
||||
// See: https://github.com/golang-migrate/migrate/issues/330
|
||||
if version >= 0 || (version == database.NilVersion && dirty) {
|
||||
query := `INSERT INTO "` + c.config.MigrationsTable + `" (version, dirty) VALUES (?, ?)`
|
||||
if err := c.session.Query(query, version, dirty).Exec(); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return current keyspace version
|
||||
func (c *Cassandra) Version() (version int, dirty bool, err error) {
|
||||
query := `SELECT version, dirty FROM "` + c.config.MigrationsTable + `" LIMIT 1`
|
||||
err = c.session.Query(query).Scan(&version, &dirty)
|
||||
switch {
|
||||
case err == gocql.ErrNotFound:
|
||||
return database.NilVersion, false, nil
|
||||
|
||||
case err != nil:
|
||||
if _, ok := err.(*gocql.Error); ok {
|
||||
return database.NilVersion, false, nil
|
||||
}
|
||||
return 0, false, &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
|
||||
default:
|
||||
return version, dirty, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cassandra) Drop() error {
|
||||
// select all tables in current schema
|
||||
query := fmt.Sprintf(`SELECT table_name from system_schema.tables WHERE keyspace_name='%s'`, c.config.KeyspaceName)
|
||||
iter := c.session.Query(query).Iter()
|
||||
var tableName string
|
||||
for iter.Scan(&tableName) {
|
||||
err := c.session.Query(fmt.Sprintf(`DROP TABLE %s`, tableName)).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureVersionTable checks if versions table exists and, if not, creates it.
|
||||
// Note that this function locks the database, which deviates from the usual
|
||||
// convention of "caller locks" in the Cassandra type.
|
||||
func (c *Cassandra) ensureVersionTable() (err error) {
|
||||
if err = c.Lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := c.Unlock(); e != nil {
|
||||
if err == nil {
|
||||
err = e
|
||||
} else {
|
||||
err = multierror.Append(err, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = c.session.Query(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (version bigint, dirty boolean, PRIMARY KEY(version))", c.config.MigrationsTable)).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, err = c.Version(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConsistency wraps gocql.ParseConsistency
|
||||
// to return an error instead of a panicking.
|
||||
func parseConsistency(consistencyStr string) (consistency gocql.Consistency, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
err, ok = r.(error)
|
||||
if !ok {
|
||||
err = fmt.Errorf("Failed to parse consistency \"%s\": %v", consistencyStr, r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
consistency = gocql.ParseConsistency(consistencyStr)
|
||||
|
||||
return consistency, nil
|
||||
}
|
||||
+122
@@ -0,0 +1,122 @@
|
||||
package cassandra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/dhui/dktest"
|
||||
"github.com/gocql/gocql"
|
||||
)
|
||||
|
||||
import (
|
||||
dt "github.com/golang-migrate/migrate/v4/database/testing"
|
||||
"github.com/golang-migrate/migrate/v4/dktesting"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
var (
|
||||
opts = dktest.Options{PortRequired: true, ReadyFunc: isReady}
|
||||
// Supported versions: http://cassandra.apache.org/download/
|
||||
// Although Cassandra 2.x is supported by the Apache Foundation,
|
||||
// the migrate db driver only supports Cassandra 3.x since it uses
|
||||
// the system_schema keyspace.
|
||||
// last ScyllaDB version tested is 5.1.11
|
||||
specs = []dktesting.ContainerSpec{
|
||||
{ImageName: "cassandra:3.0", Options: opts},
|
||||
{ImageName: "cassandra:3.11", Options: opts},
|
||||
{ImageName: "scylladb/scylla:5.1.11", Options: opts},
|
||||
}
|
||||
)
|
||||
|
||||
func isReady(ctx context.Context, c dktest.ContainerInfo) bool {
|
||||
// Cassandra exposes 5 ports (7000, 7001, 7199, 9042 & 9160)
|
||||
// We only need the port bound to 9042
|
||||
ip, portStr, err := c.Port(9042)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
cluster := gocql.NewCluster(ip)
|
||||
cluster.Port = port
|
||||
cluster.Consistency = gocql.All
|
||||
p, err := cluster.CreateSession()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer p.Close()
|
||||
// Create keyspace for tests
|
||||
if err = p.Query("CREATE KEYSPACE testks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor':1}").Exec(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
t.Run("test", test)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
|
||||
t.Cleanup(func() {
|
||||
for _, spec := range specs {
|
||||
t.Log("Cleaning up ", spec.ImageName)
|
||||
if err := spec.Cleanup(); err != nil {
|
||||
t.Error("Error removing ", spec.ImageName, "error:", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func test(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(9042)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to get mapped port:", err)
|
||||
}
|
||||
addr := fmt.Sprintf("cassandra://%v:%v/testks", ip, port)
|
||||
p := &Cassandra{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
dt.Test(t, d, []byte("SELECT table_name from system_schema.tables"))
|
||||
})
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(9042)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to get mapped port:", err)
|
||||
}
|
||||
addr := fmt.Sprintf("cassandra://%v:%v/testks", ip, port)
|
||||
p := &Cassandra{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "testks", d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.TestMigrate(t, m)
|
||||
})
|
||||
}
|
||||
+1
@@ -0,0 +1 @@
|
||||
SELECT table_name from system_schema.tables
|
||||
+1
@@ -0,0 +1 @@
|
||||
SELECT table_name from system_schema.tables
|
||||
@@ -0,0 +1,26 @@
|
||||
# ClickHouse
|
||||
|
||||
`clickhouse://host:port?username=user&password=password&database=clicks&x-multi-statement=true`
|
||||
|
||||
| URL Query | Description |
|
||||
|------------|-------------|
|
||||
| `x-migrations-table`| Name of the migrations table |
|
||||
| `x-migrations-table-engine`| Engine to use for the migrations table, defaults to TinyLog |
|
||||
| `x-cluster-name` | Name of cluster for creating `schema_migrations` table cluster wide |
|
||||
| `database` | The name of the database to connect to |
|
||||
| `username` | The user to sign in as |
|
||||
| `password` | The user's password |
|
||||
| `host` | The host to connect to. |
|
||||
| `port` | The port to bind to. |
|
||||
| `x-multi-statement` | false | Enable multiple statements to be ran in a single migration (See note below) |
|
||||
|
||||
## Notes
|
||||
|
||||
* The Clickhouse driver does not natively support executing multiple statements in a single query. To allow for multiple statements in a single migration, you can use the `x-multi-statement` param. There are two important caveats:
|
||||
* This mode splits the migration text into separately-executed statements by a semi-colon `;`. Thus `x-multi-statement` cannot be used when a statement in the migration contains a string with a semi-colon.
|
||||
* The queries are not executed in any sort of transaction/batch, meaning you are responsible for fixing partial migrations.
|
||||
* Using the default TinyLog table engine for the schema_versions table prevents backing up the table if using the [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup) tool. If backing up the database with make sure the migrations are run with `x-migrations-table-engine=MergeTree`.
|
||||
* Clickhouse cluster mode is not officially supported, since it's not tested right now, but you can try enabling `schema_migrations` table replication by specifying a `x-cluster-name`:
|
||||
* When `x-cluster-name` is specified, `x-migrations-table-engine` also should be specified. See the docs regarding [replicated table engines](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/replication/#table_engines-replication).
|
||||
* When `x-cluster-name` is specified, only the `schema_migrations` table is replicated across the cluster. You still need to write your migrations so that the application tables are replicated within the cluster.
|
||||
* If you want to create database inside the migration, you should know, that table which will manage migrations `schema-migrations table` will be in `default` table, so you can't use `USE <database_name>` inside migration. In this case you may not specify the database in the connection string (example you can find [here](examples/migrations/003_create_database.up.sql))
|
||||
+316
@@ -0,0 +1,316 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
"github.com/golang-migrate/migrate/v4/database/multistmt"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
var (
|
||||
multiStmtDelimiter = []byte(";")
|
||||
|
||||
DefaultMigrationsTable = "schema_migrations"
|
||||
DefaultMigrationsTableEngine = "TinyLog"
|
||||
DefaultMultiStatementMaxSize = 10 * 1 << 20 // 10 MB
|
||||
|
||||
ErrNilConfig = fmt.Errorf("no config")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
DatabaseName string
|
||||
ClusterName string
|
||||
MigrationsTable string
|
||||
MigrationsTableEngine string
|
||||
MultiStatementEnabled bool
|
||||
MultiStatementMaxSize int
|
||||
}
|
||||
|
||||
func init() {
|
||||
database.Register("clickhouse", &ClickHouse{})
|
||||
}
|
||||
|
||||
func WithInstance(conn *sql.DB, config *Config) (database.Driver, error) {
|
||||
if config == nil {
|
||||
return nil, ErrNilConfig
|
||||
}
|
||||
|
||||
if err := conn.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch := &ClickHouse{
|
||||
conn: conn,
|
||||
config: config,
|
||||
}
|
||||
|
||||
if err := ch.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
type ClickHouse struct {
|
||||
conn *sql.DB
|
||||
config *Config
|
||||
isLocked atomic.Bool
|
||||
}
|
||||
|
||||
func (ch *ClickHouse) Open(dsn string) (database.Driver, error) {
|
||||
purl, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q := migrate.FilterCustomQuery(purl)
|
||||
q.Scheme = "tcp"
|
||||
conn, err := sql.Open("clickhouse", q.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
multiStatementMaxSize := DefaultMultiStatementMaxSize
|
||||
if s := purl.Query().Get("x-multi-statement-max-size"); len(s) > 0 {
|
||||
multiStatementMaxSize, err = strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
migrationsTableEngine := DefaultMigrationsTableEngine
|
||||
if s := purl.Query().Get("x-migrations-table-engine"); len(s) > 0 {
|
||||
migrationsTableEngine = s
|
||||
}
|
||||
|
||||
ch = &ClickHouse{
|
||||
conn: conn,
|
||||
config: &Config{
|
||||
MigrationsTable: purl.Query().Get("x-migrations-table"),
|
||||
MigrationsTableEngine: migrationsTableEngine,
|
||||
DatabaseName: purl.Query().Get("database"),
|
||||
ClusterName: purl.Query().Get("x-cluster-name"),
|
||||
MultiStatementEnabled: purl.Query().Get("x-multi-statement") == "true",
|
||||
MultiStatementMaxSize: multiStatementMaxSize,
|
||||
},
|
||||
}
|
||||
|
||||
if err := ch.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (ch *ClickHouse) init() error {
|
||||
if len(ch.config.DatabaseName) == 0 {
|
||||
if err := ch.conn.QueryRow("SELECT currentDatabase()").Scan(&ch.config.DatabaseName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(ch.config.MigrationsTable) == 0 {
|
||||
ch.config.MigrationsTable = DefaultMigrationsTable
|
||||
}
|
||||
|
||||
if ch.config.MultiStatementMaxSize <= 0 {
|
||||
ch.config.MultiStatementMaxSize = DefaultMultiStatementMaxSize
|
||||
}
|
||||
|
||||
if len(ch.config.MigrationsTableEngine) == 0 {
|
||||
ch.config.MigrationsTableEngine = DefaultMigrationsTableEngine
|
||||
}
|
||||
|
||||
return ch.ensureVersionTable()
|
||||
}
|
||||
|
||||
func (ch *ClickHouse) Run(r io.Reader) error {
|
||||
if ch.config.MultiStatementEnabled {
|
||||
var err error
|
||||
if e := multistmt.Parse(r, multiStmtDelimiter, ch.config.MultiStatementMaxSize, func(m []byte) bool {
|
||||
tq := strings.TrimSpace(string(m))
|
||||
if tq == "" {
|
||||
return true
|
||||
}
|
||||
if _, e := ch.conn.Exec(string(m)); e != nil {
|
||||
err = database.Error{OrigErr: e, Err: "migration failed", Query: m}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}); e != nil {
|
||||
return e
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
migration, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := ch.conn.Exec(string(migration)); err != nil {
|
||||
return database.Error{OrigErr: err, Err: "migration failed", Query: migration}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (ch *ClickHouse) Version() (int, bool, error) {
|
||||
var (
|
||||
version int
|
||||
dirty uint8
|
||||
query = "SELECT version, dirty FROM `" + ch.config.MigrationsTable + "` ORDER BY sequence DESC LIMIT 1"
|
||||
)
|
||||
if err := ch.conn.QueryRow(query).Scan(&version, &dirty); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return database.NilVersion, false, nil
|
||||
}
|
||||
return 0, false, &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
return version, dirty == 1, nil
|
||||
}
|
||||
|
||||
func (ch *ClickHouse) SetVersion(version int, dirty bool) error {
|
||||
var (
|
||||
bool = func(v bool) uint8 {
|
||||
if v {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
tx, err = ch.conn.Begin()
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := "INSERT INTO " + ch.config.MigrationsTable + " (version, dirty, sequence) VALUES (?, ?, ?)"
|
||||
if _, err := tx.Exec(query, version, bool(dirty), time.Now().UnixNano()); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// ensureVersionTable checks if versions table exists and, if not, creates it.
|
||||
// Note that this function locks the database, which deviates from the usual
|
||||
// convention of "caller locks" in the ClickHouse type.
|
||||
func (ch *ClickHouse) ensureVersionTable() (err error) {
|
||||
if err = ch.Lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := ch.Unlock(); e != nil {
|
||||
if err == nil {
|
||||
err = e
|
||||
} else {
|
||||
err = multierror.Append(err, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
table string
|
||||
query = "SHOW TABLES FROM " + quoteIdentifier(ch.config.DatabaseName) + " LIKE '" + ch.config.MigrationsTable + "'"
|
||||
)
|
||||
// check if migration table exists
|
||||
if err := ch.conn.QueryRow(query).Scan(&table); err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if not, create the empty migration table
|
||||
if len(ch.config.ClusterName) > 0 {
|
||||
query = fmt.Sprintf(`
|
||||
CREATE TABLE %s ON CLUSTER %s (
|
||||
version Int64,
|
||||
dirty UInt8,
|
||||
sequence UInt64
|
||||
) Engine=%s`, ch.config.MigrationsTable, ch.config.ClusterName, ch.config.MigrationsTableEngine)
|
||||
} else {
|
||||
query = fmt.Sprintf(`
|
||||
CREATE TABLE %s (
|
||||
version Int64,
|
||||
dirty UInt8,
|
||||
sequence UInt64
|
||||
) Engine=%s`, ch.config.MigrationsTable, ch.config.MigrationsTableEngine)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(ch.config.MigrationsTableEngine, "Tree") {
|
||||
query = fmt.Sprintf(`%s ORDER BY sequence`, query)
|
||||
}
|
||||
|
||||
if _, err := ch.conn.Exec(query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *ClickHouse) Drop() (err error) {
|
||||
query := "SHOW TABLES FROM " + quoteIdentifier(ch.config.DatabaseName)
|
||||
tables, err := ch.conn.Query(query)
|
||||
|
||||
if err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
defer func() {
|
||||
if errClose := tables.Close(); errClose != nil {
|
||||
err = multierror.Append(err, errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
for tables.Next() {
|
||||
var table string
|
||||
if err := tables.Scan(&table); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query = "DROP TABLE IF EXISTS " + quoteIdentifier(ch.config.DatabaseName) + "." + quoteIdentifier(table)
|
||||
|
||||
if _, err := ch.conn.Exec(query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
}
|
||||
if err := tables.Err(); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *ClickHouse) Lock() error {
|
||||
if !ch.isLocked.CAS(false, true) {
|
||||
return database.ErrLocked
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (ch *ClickHouse) Unlock() error {
|
||||
if !ch.isLocked.CAS(true, false) {
|
||||
return database.ErrNotLocked
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (ch *ClickHouse) Close() error { return ch.conn.Close() }
|
||||
|
||||
// Copied from lib/pq implementation: https://github.com/lib/pq/blob/v1.9.0/conn.go#L1611
|
||||
func quoteIdentifier(name string) string {
|
||||
end := strings.IndexRune(name, 0)
|
||||
if end > -1 {
|
||||
name = name[:end]
|
||||
}
|
||||
return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
|
||||
}
|
||||
+224
@@ -0,0 +1,224 @@
|
||||
package clickhouse_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
sqldriver "database/sql/driver"
|
||||
"fmt"
|
||||
"log"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ClickHouse/clickhouse-go"
|
||||
"github.com/dhui/dktest"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database/clickhouse"
|
||||
dt "github.com/golang-migrate/migrate/v4/database/testing"
|
||||
"github.com/golang-migrate/migrate/v4/dktesting"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
const defaultPort = 9000
|
||||
|
||||
var (
|
||||
tableEngines = []string{"TinyLog", "MergeTree"}
|
||||
opts = dktest.Options{
|
||||
Env: map[string]string{"CLICKHOUSE_USER": "user", "CLICKHOUSE_PASSWORD": "password", "CLICKHOUSE_DB": "db"},
|
||||
PortRequired: true, ReadyFunc: isReady,
|
||||
}
|
||||
specs = []dktesting.ContainerSpec{
|
||||
{ImageName: "yandex/clickhouse-server:21.3", Options: opts},
|
||||
}
|
||||
)
|
||||
|
||||
func clickhouseConnectionString(host, port, engine string) string {
|
||||
if engine != "" {
|
||||
return fmt.Sprintf(
|
||||
"clickhouse://%v:%v?username=user&password=password&database=db&x-multi-statement=true&x-migrations-table-engine=%v&debug=false",
|
||||
host, port, engine)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"clickhouse://%v:%v?username=user&password=password&database=db&x-multi-statement=true&debug=false",
|
||||
host, port)
|
||||
}
|
||||
|
||||
func isReady(ctx context.Context, c dktest.ContainerInfo) bool {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
db, err := sql.Open("clickhouse", clickhouseConnectionString(ip, port, ""))
|
||||
|
||||
if err != nil {
|
||||
log.Println("open error", err)
|
||||
return false
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Println("close error:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = db.PingContext(ctx); err != nil {
|
||||
switch err {
|
||||
case sqldriver.ErrBadConn:
|
||||
return false
|
||||
default:
|
||||
fmt.Println(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func TestCases(t *testing.T) {
|
||||
for _, engine := range tableEngines {
|
||||
t.Run("Test_"+engine, func(t *testing.T) { testSimple(t, engine) })
|
||||
t.Run("Migrate_"+engine, func(t *testing.T) { testMigrate(t, engine) })
|
||||
t.Run("Version_"+engine, func(t *testing.T) { testVersion(t, engine) })
|
||||
t.Run("Drop_"+engine, func(t *testing.T) { testDrop(t, engine) })
|
||||
}
|
||||
t.Run("WithInstanceDefaultConfigValues", func(t *testing.T) { testSimpleWithInstanceDefaultConfigValues(t) })
|
||||
}
|
||||
|
||||
func testSimple(t *testing.T, engine string) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := clickhouseConnectionString(ip, port, engine)
|
||||
p := &clickhouse.ClickHouse{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
dt.Test(t, d, []byte("SELECT 1"))
|
||||
})
|
||||
}
|
||||
|
||||
func testSimpleWithInstanceDefaultConfigValues(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := clickhouseConnectionString(ip, port, "")
|
||||
conn, err := sql.Open("clickhouse", addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d, err := clickhouse.WithInstance(conn, &clickhouse.Config{})
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
dt.Test(t, d, []byte("SELECT 1"))
|
||||
})
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T, engine string) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := clickhouseConnectionString(ip, port, engine)
|
||||
p := &clickhouse.ClickHouse{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "db", d)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.TestMigrate(t, m)
|
||||
})
|
||||
}
|
||||
|
||||
func testVersion(t *testing.T, engine string) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
expectedVersion := 1
|
||||
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := clickhouseConnectionString(ip, port, engine)
|
||||
p := &clickhouse.ClickHouse{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = d.SetVersion(expectedVersion, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
version, _, err := d.Version()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if version != expectedVersion {
|
||||
t.Fatal("Version mismatch")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testDrop(t *testing.T, engine string) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := clickhouseConnectionString(ip, port, engine)
|
||||
p := &clickhouse.ClickHouse{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = d.Drop()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS test_1;
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
CREATE TABLE test_1 (
|
||||
Date Date
|
||||
) Engine=Memory;
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS test_2;
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
CREATE TABLE test_2 (
|
||||
Date Date
|
||||
) Engine=Memory;
|
||||
+10
@@ -0,0 +1,10 @@
|
||||
DROP TABLE IF EXISTS driver_ratings;
|
||||
DROP TABLE IF EXISTS user_ratings;
|
||||
DROP TABLE IF EXISTS orders;
|
||||
DROP TABLE IF EXISTS driver_ratings_queue;
|
||||
DROP TABLE IF EXISTS user_ratings_queue;
|
||||
DROP TABLE IF EXISTS orders_queue;
|
||||
DROP VIEW IF EXISTS user_ratings_queue_mv;
|
||||
DROP VIEW IF EXISTS driver_ratings_queue_mv;
|
||||
DROP VIEW IF EXISTS orders_queue_mv;
|
||||
DROP DATABASE IF EXISTS analytics;
|
||||
+81
@@ -0,0 +1,81 @@
|
||||
CREATE DATABASE IF NOT EXISTS analytics;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS analytics.driver_ratings(
|
||||
rate UInt8,
|
||||
userID Int64,
|
||||
driverID String,
|
||||
orderID String,
|
||||
inserted_time DateTime DEFAULT now()
|
||||
) ENGINE = MergeTree
|
||||
PARTITION BY driverID
|
||||
ORDER BY (inserted_time);
|
||||
|
||||
CREATE TABLE analytics.driver_ratings_queue(
|
||||
rate UInt8,
|
||||
userID Int64,
|
||||
driverID String,
|
||||
orderID String
|
||||
) ENGINE = Kafka
|
||||
SETTINGS kafka_broker_list = 'broker:9092',
|
||||
kafka_topic_list = 'driver-ratings',
|
||||
kafka_group_name = 'rating_readers',
|
||||
kafka_format = 'Avro',
|
||||
kafka_max_block_size = 1048576;
|
||||
|
||||
CREATE MATERIALIZED VIEW analytics.driver_ratings_queue_mv TO analytics.driver_ratings AS
|
||||
SELECT rate, userID, driverID, orderID
|
||||
FROM analytics.driver_ratings_queue;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS analytics.user_ratings(
|
||||
rate UInt8,
|
||||
userID Int64,
|
||||
driverID String,
|
||||
orderID String,
|
||||
inserted_time DateTime DEFAULT now()
|
||||
) ENGINE = MergeTree
|
||||
PARTITION BY userID
|
||||
ORDER BY (inserted_time);
|
||||
|
||||
CREATE TABLE analytics.user_ratings_queue(
|
||||
rate UInt8,
|
||||
userID Int64,
|
||||
driverID String,
|
||||
orderID String
|
||||
) ENGINE = Kafka
|
||||
SETTINGS kafka_broker_list = 'broker:9092',
|
||||
kafka_topic_list = 'user-ratings',
|
||||
kafka_group_name = 'rating_readers',
|
||||
kafka_format = 'JSON',
|
||||
kafka_max_block_size = 1048576;
|
||||
|
||||
CREATE MATERIALIZED VIEW analytics.user_ratings_queue_mv TO analytics.user_ratings AS
|
||||
SELECT rate, userID, driverID, orderID
|
||||
FROM analytics.user_ratings_queue;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS analytics.orders(
|
||||
from_place String,
|
||||
to_place String,
|
||||
userID Int64,
|
||||
driverID String,
|
||||
orderID String,
|
||||
inserted_time DateTime DEFAULT now()
|
||||
) ENGINE = MergeTree
|
||||
PARTITION BY driverID
|
||||
ORDER BY (inserted_time);
|
||||
|
||||
CREATE TABLE analytics.orders_queue(
|
||||
from_place String,
|
||||
to_place String,
|
||||
userID Int64,
|
||||
driverID String,
|
||||
orderID String
|
||||
) ENGINE = Kafka
|
||||
SETTINGS kafka_broker_list = 'broker:9092',
|
||||
kafka_topic_list = 'orders',
|
||||
kafka_group_name = 'order_readers',
|
||||
kafka_format = 'Avro',
|
||||
kafka_max_block_size = 1048576;
|
||||
|
||||
CREATE MATERIALIZED VIEW analytics.orders_queue_mv TO orders AS
|
||||
SELECT from_place, to_place, userID, driverID, orderID
|
||||
FROM analytics.orders_queue;
|
||||
+19
@@ -0,0 +1,19 @@
|
||||
# cockroachdb
|
||||
|
||||
`cockroachdb://user:password@host:port/dbname?query` (`cockroach://`, and `crdb-postgres://` work, too)
|
||||
|
||||
| URL Query | WithInstance Config | Description |
|
||||
|------------|---------------------|-------------|
|
||||
| `x-migrations-table` | `MigrationsTable` | Name of the migrations table |
|
||||
| `x-lock-table` | `LockTable` | Name of the table which maintains the migration lock |
|
||||
| `x-force-lock` | `ForceLock` | Force lock acquisition to fix faulty migrations which may not have released the schema lock (Boolean, default is `false`) |
|
||||
| `dbname` | `DatabaseName` | The name of the database to connect to |
|
||||
| `user` | | The user to sign in as |
|
||||
| `password` | | The user's password |
|
||||
| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) |
|
||||
| `port` | | The port to bind to. (default is 5432) |
|
||||
| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. |
|
||||
| `sslcert` | | Cert file location. The file must contain PEM encoded data. |
|
||||
| `sslkey` | | Key file location. The file must contain PEM encoded data. |
|
||||
| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. |
|
||||
| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) |
|
||||
+142
@@ -0,0 +1,142 @@
|
||||
# CockroachDB tutorial for beginners (insecure cluster)
|
||||
|
||||
## Create/configure database
|
||||
|
||||
First, let's start a local cluster - follow step 1. and 2. from [the docs](https://www.cockroachlabs.com/docs/stable/start-a-local-cluster.html#step-1-start-the-first-node).
|
||||
|
||||
Once you have it, create a database. Here I am going to create a database called `example`.
|
||||
Our user here is `cockroach`. We are not going to use a password, since it's not supported for insecure cluster.
|
||||
```
|
||||
cockroach sql --insecure --host=localhost:26257
|
||||
```
|
||||
```
|
||||
CREATE DATABASE example;
|
||||
CREATE USER IF NOT EXISTS cockroach;
|
||||
GRANT ALL ON DATABASE example TO cockroach;
|
||||
```
|
||||
|
||||
When using Migrate CLI we need to pass to database URL. Let's export it to a variable for convenience:
|
||||
```
|
||||
export COCKROACHDB_URL='cockroachdb://cockroach:@localhost:26257/example?sslmode=disable'
|
||||
```
|
||||
`sslmode=disable` means that the connection with our database will not be encrypted. This is needed to connect to an insecure node.
|
||||
|
||||
**NOTE:** Do not use COCKROACH_URL as a variable name here, it's already in use for discrete parameters and you may run into connection problems. For more info check out [docs](https://www.cockroachlabs.com/docs/stable/connection-parameters.html#connect-using-discrete-parameters).
|
||||
|
||||
You can find further description of database URLs [here](README.md#database-urls).
|
||||
|
||||
## Create migrations
|
||||
Let's create a table called `users`:
|
||||
```
|
||||
migrate create -ext sql -dir db/migrations -seq create_users_table
|
||||
```
|
||||
If there were no errors, we should have two files available under `db/migrations` folder:
|
||||
- 000001_create_users_table.down.sql
|
||||
- 000001_create_users_table.up.sql
|
||||
|
||||
Note the `sql` extension that we provided.
|
||||
|
||||
In the `.up.sql` file let's create the table:
|
||||
```
|
||||
CREATE TABLE IF NOT EXISTS example.users
|
||||
(
|
||||
user_id INT PRIMARY KEY,
|
||||
username VARCHAR (50) UNIQUE NOT NULL,
|
||||
password VARCHAR (50) NOT NULL,
|
||||
email VARCHAR (300) UNIQUE NOT NULL
|
||||
);
|
||||
```
|
||||
And in the `.down.sql` let's delete it:
|
||||
```
|
||||
DROP TABLE IF EXISTS example.users;
|
||||
```
|
||||
By adding `IF EXISTS/IF NOT EXISTS` we are making migrations idempotent - you can read more about idempotency in [getting started](/GETTING_STARTED.md#create-migrations)
|
||||
|
||||
## Run migrations
|
||||
```
|
||||
migrate -database ${COCKROACHDB_URL} -path db/migrations up
|
||||
```
|
||||
Let's check if the table was created properly by running `cockroach sql --insecure --host=localhost:26257 -e "show columns from example.users;"`.
|
||||
The output you are supposed to see:
|
||||
```
|
||||
column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden
|
||||
+-------------+--------------+-------------+----------------+-----------------------+----------------------------------------------+-----------+
|
||||
user_id | INT8 | false | NULL | | {primary,users_username_key,users_email_key} | false
|
||||
username | VARCHAR(50) | false | NULL | | {users_username_key} | false
|
||||
password | VARCHAR(50) | false | NULL | | {} | false
|
||||
email | VARCHAR(300) | false | NULL | | {users_email_key} | false
|
||||
(4 rows)
|
||||
```
|
||||
Now let's check if running reverse migration also works:
|
||||
```
|
||||
migrate -database ${COCKROACHDB_URL} -path db/migrations down
|
||||
```
|
||||
Make sure to check if your database changed as expected in this case as well.
|
||||
|
||||
## Database transactions
|
||||
|
||||
To show database transactions usage, let's create another set of migrations by running:
|
||||
```
|
||||
migrate create -ext sql -dir db/migrations -seq add_mood_to_users
|
||||
```
|
||||
Again, it should create for us two migrations files:
|
||||
- 000002_add_mood_to_users.down.sql
|
||||
- 000002_add_mood_to_users.up.sql
|
||||
|
||||
In Cockroach, when we want our queries to be done in a transaction, we need to wrap it with `BEGIN` and `COMMIT` commands, similar to PostgreSQL.
|
||||
In our example, we are going to add a column to our database that can only accept enumerable values or NULL.
|
||||
Migration up:
|
||||
```
|
||||
BEGIN;
|
||||
|
||||
ALTER TABLE example.users ADD COLUMN mood STRING;
|
||||
ALTER TABLE example.users ADD CONSTRAINT check_mood CHECK (mood IN ('happy', 'sad', 'neutral'));
|
||||
|
||||
COMMIT;
|
||||
```
|
||||
Migration down:
|
||||
```
|
||||
ALTER TABLE example.users DROP COLUMN mood;
|
||||
```
|
||||
|
||||
Now we can run our new migration and check the database:
|
||||
```
|
||||
migrate -database ${COCKROACHDB_URL} -path db/migrations up
|
||||
cockroach sql --insecure --host=localhost:26257 -e "show columns from example.users;"
|
||||
```
|
||||
Expected output:
|
||||
```
|
||||
column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden
|
||||
+-------------+--------------+-------------+----------------+-----------------------+----------------------------------------------+-----------+
|
||||
user_id | INT8 | false | NULL | | {primary,users_username_key,users_email_key} | false
|
||||
username | VARCHAR(50) | false | NULL | | {users_username_key} | false
|
||||
password | VARCHAR(50) | false | NULL | | {} | false
|
||||
email | VARCHAR(300) | false | NULL | | {users_email_key} | false
|
||||
mood | STRING | true | NULL | | {} | false
|
||||
(5 rows)
|
||||
```
|
||||
|
||||
## Optional: Run migrations within your Go app
|
||||
Here is a very simple app running migrations for the above configuration:
|
||||
```
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/cockroachdb"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m, err := migrate.New(
|
||||
"file://db/migrations",
|
||||
"cockroachdb://cockroach:@localhost:26257/example?sslmode=disable")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := m.Up(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
You can find details [here](README.md#use-in-your-go-project)
|
||||
+365
@@ -0,0 +1,365 @@
|
||||
package cockroachdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/cockroachdb/cockroach-go/v2/crdb"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/lib/pq"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
func init() {
|
||||
db := CockroachDb{}
|
||||
database.Register("cockroach", &db)
|
||||
database.Register("cockroachdb", &db)
|
||||
database.Register("crdb-postgres", &db)
|
||||
}
|
||||
|
||||
var DefaultMigrationsTable = "schema_migrations"
|
||||
var DefaultLockTable = "schema_lock"
|
||||
|
||||
var (
|
||||
ErrNilConfig = fmt.Errorf("no config")
|
||||
ErrNoDatabaseName = fmt.Errorf("no database name")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
MigrationsTable string
|
||||
LockTable string
|
||||
ForceLock bool
|
||||
DatabaseName string
|
||||
}
|
||||
|
||||
type CockroachDb struct {
|
||||
db *sql.DB
|
||||
isLocked atomic.Bool
|
||||
|
||||
// Open and WithInstance need to guarantee that config is never nil
|
||||
config *Config
|
||||
}
|
||||
|
||||
func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) {
|
||||
if config == nil {
|
||||
return nil, ErrNilConfig
|
||||
}
|
||||
|
||||
if err := instance.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.DatabaseName == "" {
|
||||
query := `SELECT current_database()`
|
||||
var databaseName string
|
||||
if err := instance.QueryRow(query).Scan(&databaseName); err != nil {
|
||||
return nil, &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
if len(databaseName) == 0 {
|
||||
return nil, ErrNoDatabaseName
|
||||
}
|
||||
|
||||
config.DatabaseName = databaseName
|
||||
}
|
||||
|
||||
if len(config.MigrationsTable) == 0 {
|
||||
config.MigrationsTable = DefaultMigrationsTable
|
||||
}
|
||||
|
||||
if len(config.LockTable) == 0 {
|
||||
config.LockTable = DefaultLockTable
|
||||
}
|
||||
|
||||
px := &CockroachDb{
|
||||
db: instance,
|
||||
config: config,
|
||||
}
|
||||
|
||||
// ensureVersionTable is a locking operation, so we need to ensureLockTable before we ensureVersionTable.
|
||||
if err := px.ensureLockTable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := px.ensureVersionTable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return px, nil
|
||||
}
|
||||
|
||||
func (c *CockroachDb) Open(url string) (database.Driver, error) {
|
||||
purl, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// As Cockroach uses the postgres protocol, and 'postgres' is already a registered database, we need to replace the
|
||||
// connect prefix, with the actual protocol, so that the library can differentiate between the implementations
|
||||
re := regexp.MustCompile("^(cockroach(db)?|crdb-postgres)")
|
||||
connectString := re.ReplaceAllString(migrate.FilterCustomQuery(purl).String(), "postgres")
|
||||
|
||||
db, err := sql.Open("postgres", connectString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrationsTable := purl.Query().Get("x-migrations-table")
|
||||
if len(migrationsTable) == 0 {
|
||||
migrationsTable = DefaultMigrationsTable
|
||||
}
|
||||
|
||||
lockTable := purl.Query().Get("x-lock-table")
|
||||
if len(lockTable) == 0 {
|
||||
lockTable = DefaultLockTable
|
||||
}
|
||||
|
||||
forceLockQuery := purl.Query().Get("x-force-lock")
|
||||
forceLock, err := strconv.ParseBool(forceLockQuery)
|
||||
if err != nil {
|
||||
forceLock = false
|
||||
}
|
||||
|
||||
px, err := WithInstance(db, &Config{
|
||||
DatabaseName: purl.Path,
|
||||
MigrationsTable: migrationsTable,
|
||||
LockTable: lockTable,
|
||||
ForceLock: forceLock,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return px, nil
|
||||
}
|
||||
|
||||
func (c *CockroachDb) Close() error {
|
||||
return c.db.Close()
|
||||
}
|
||||
|
||||
// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed
|
||||
// See: https://github.com/cockroachdb/cockroach/issues/13546
|
||||
func (c *CockroachDb) Lock() error {
|
||||
return database.CasRestoreOnErr(&c.isLocked, false, true, database.ErrLocked, func() (err error) {
|
||||
return crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) (err error) {
|
||||
aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := "SELECT * FROM " + c.config.LockTable + " WHERE lock_id = $1"
|
||||
rows, err := tx.Query(query, aid)
|
||||
if err != nil {
|
||||
return database.Error{OrigErr: err, Err: "failed to fetch migration lock", Query: []byte(query)}
|
||||
}
|
||||
defer func() {
|
||||
if errClose := rows.Close(); errClose != nil {
|
||||
err = multierror.Append(err, errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
// If row exists at all, lock is present
|
||||
locked := rows.Next()
|
||||
if locked && !c.config.ForceLock {
|
||||
return database.ErrLocked
|
||||
}
|
||||
|
||||
query = "INSERT INTO " + c.config.LockTable + " (lock_id) VALUES ($1)"
|
||||
if _, err := tx.Exec(query, aid); err != nil {
|
||||
return database.Error{OrigErr: err, Err: "failed to set migration lock", Query: []byte(query)}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed
|
||||
// See: https://github.com/cockroachdb/cockroach/issues/13546
|
||||
func (c *CockroachDb) Unlock() error {
|
||||
return database.CasRestoreOnErr(&c.isLocked, true, false, database.ErrNotLocked, func() (err error) {
|
||||
aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// In the event of an implementation (non-migration) error, it is possible for the lock to not be released. Until
|
||||
// a better locking mechanism is added, a manual purging of the lock table may be required in such circumstances
|
||||
query := "DELETE FROM " + c.config.LockTable + " WHERE lock_id = $1"
|
||||
if _, err := c.db.Exec(query, aid); err != nil {
|
||||
if e, ok := err.(*pq.Error); ok {
|
||||
// 42P01 is "UndefinedTableError" in CockroachDB
|
||||
// https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go
|
||||
if e.Code == "42P01" {
|
||||
// On drops, the lock table is fully removed; This is fine, and is a valid "unlocked" state for the schema
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return database.Error{OrigErr: err, Err: "failed to release migration lock", Query: []byte(query)}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CockroachDb) Run(migration io.Reader) error {
|
||||
migr, err := io.ReadAll(migration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// run migration
|
||||
query := string(migr[:])
|
||||
if _, err := c.db.Exec(query); err != nil {
|
||||
return database.Error{OrigErr: err, Err: "migration failed", Query: migr}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CockroachDb) SetVersion(version int, dirty bool) error {
|
||||
return crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error {
|
||||
if _, err := tx.Exec(`DELETE FROM "` + c.config.MigrationsTable + `"`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also re-write the schema version for nil dirty versions to prevent
|
||||
// empty schema version for failed down migration on the first migration
|
||||
// See: https://github.com/golang-migrate/migrate/issues/330
|
||||
if version >= 0 || (version == database.NilVersion && dirty) {
|
||||
if _, err := tx.Exec(`INSERT INTO "`+c.config.MigrationsTable+`" (version, dirty) VALUES ($1, $2)`, version, dirty); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CockroachDb) Version() (version int, dirty bool, err error) {
|
||||
query := `SELECT version, dirty FROM "` + c.config.MigrationsTable + `" LIMIT 1`
|
||||
err = c.db.QueryRow(query).Scan(&version, &dirty)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return database.NilVersion, false, nil
|
||||
|
||||
case err != nil:
|
||||
if e, ok := err.(*pq.Error); ok {
|
||||
// 42P01 is "UndefinedTableError" in CockroachDB
|
||||
// https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go
|
||||
if e.Code == "42P01" {
|
||||
return database.NilVersion, false, nil
|
||||
}
|
||||
}
|
||||
return 0, false, &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
|
||||
default:
|
||||
return version, dirty, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CockroachDb) Drop() (err error) {
|
||||
// select all tables in current schema
|
||||
query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())`
|
||||
tables, err := c.db.Query(query)
|
||||
if err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
defer func() {
|
||||
if errClose := tables.Close(); errClose != nil {
|
||||
err = multierror.Append(err, errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
// delete one table after another
|
||||
tableNames := make([]string, 0)
|
||||
for tables.Next() {
|
||||
var tableName string
|
||||
if err := tables.Scan(&tableName); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(tableName) > 0 {
|
||||
tableNames = append(tableNames, tableName)
|
||||
}
|
||||
}
|
||||
if err := tables.Err(); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
if len(tableNames) > 0 {
|
||||
// delete one by one ...
|
||||
for _, t := range tableNames {
|
||||
query = `DROP TABLE IF EXISTS ` + t + ` CASCADE`
|
||||
if _, err := c.db.Exec(query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureVersionTable checks if versions table exists and, if not, creates it.
|
||||
// Note that this function locks the database, which deviates from the usual
|
||||
// convention of "caller locks" in the CockroachDb type.
|
||||
func (c *CockroachDb) ensureVersionTable() (err error) {
|
||||
if err = c.Lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := c.Unlock(); e != nil {
|
||||
if err == nil {
|
||||
err = e
|
||||
} else {
|
||||
err = multierror.Append(err, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// check if migration table exists
|
||||
var count int
|
||||
query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1`
|
||||
if err := c.db.QueryRow(query, c.config.MigrationsTable).Scan(&count); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
if count == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if not, create the empty migration table
|
||||
query = `CREATE TABLE "` + c.config.MigrationsTable + `" (version INT NOT NULL PRIMARY KEY, dirty BOOL NOT NULL)`
|
||||
if _, err := c.db.Exec(query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CockroachDb) ensureLockTable() error {
|
||||
// check if lock table exists
|
||||
var count int
|
||||
query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1`
|
||||
if err := c.db.QueryRow(query, c.config.LockTable).Scan(&count); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
if count == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if not, create the empty lock table
|
||||
query = `CREATE TABLE "` + c.config.LockTable + `" (lock_id INT NOT NULL PRIMARY KEY)`
|
||||
if _, err := c.db.Exec(query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
+174
@@ -0,0 +1,174 @@
|
||||
package cockroachdb
|
||||
|
||||
// error codes https://github.com/lib/pq/blob/master/error.go
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/dhui/dktest"
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
import (
|
||||
dt "github.com/golang-migrate/migrate/v4/database/testing"
|
||||
"github.com/golang-migrate/migrate/v4/dktesting"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
const defaultPort = 26257
|
||||
|
||||
var (
|
||||
opts = dktest.Options{Cmd: []string{"start", "--insecure"}, PortRequired: true, ReadyFunc: isReady}
|
||||
// Released versions: https://www.cockroachlabs.com/docs/releases/
|
||||
specs = []dktesting.ContainerSpec{
|
||||
{ImageName: "cockroachdb/cockroach:v1.0.7", Options: opts},
|
||||
{ImageName: "cockroachdb/cockroach:v1.1.9", Options: opts},
|
||||
{ImageName: "cockroachdb/cockroach:v2.0.7", Options: opts},
|
||||
{ImageName: "cockroachdb/cockroach:v2.1.3", Options: opts},
|
||||
}
|
||||
)
|
||||
|
||||
func isReady(ctx context.Context, c dktest.ContainerInfo) bool {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
log.Println("port error:", err)
|
||||
return false
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", fmt.Sprintf("postgres://root@%v:%v?sslmode=disable", ip, port))
|
||||
if err != nil {
|
||||
log.Println("open error:", err)
|
||||
return false
|
||||
}
|
||||
if err := db.PingContext(ctx); err != nil {
|
||||
log.Println("ping error:", err)
|
||||
return false
|
||||
}
|
||||
if err := db.Close(); err != nil {
|
||||
log.Println("close error:", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func createDB(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", fmt.Sprintf("postgres://root@%v:%v?sslmode=disable", ip, port))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = db.Ping(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err = db.Exec("CREATE DATABASE migrate"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, ci dktest.ContainerInfo) {
|
||||
createDB(t, ci)
|
||||
|
||||
ip, port, err := ci.Port(26257)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", ip, port)
|
||||
c := &CockroachDb{}
|
||||
d, err := c.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.Test(t, d, []byte("SELECT 1"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, ci dktest.ContainerInfo) {
|
||||
createDB(t, ci)
|
||||
|
||||
ip, port, err := ci.Port(26257)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", ip, port)
|
||||
c := &CockroachDb{}
|
||||
d, err := c.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "migrate", d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.TestMigrate(t, m)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultiStatement(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, ci dktest.ContainerInfo) {
|
||||
createDB(t, ci)
|
||||
|
||||
ip, port, err := ci.Port(26257)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", ip, port)
|
||||
c := &CockroachDb{}
|
||||
d, err := c.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Run(strings.NewReader("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);")); err != nil {
|
||||
t.Fatalf("expected err to be nil, got %v", err)
|
||||
}
|
||||
|
||||
// make sure second table exists
|
||||
var exists bool
|
||||
if err := d.(*CockroachDb).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Fatalf("expected table bar to exist")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterCustomQuery(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, ci dktest.ContainerInfo) {
|
||||
createDB(t, ci)
|
||||
|
||||
ip, port, err := ci.Port(26257)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable&x-custom=foobar", ip, port)
|
||||
c := &CockroachDb{}
|
||||
_, err = c.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS users;
|
||||
+5
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE users (
|
||||
user_id INT UNIQUE,
|
||||
name STRING(40),
|
||||
email STRING(40)
|
||||
);
|
||||
+1
@@ -0,0 +1 @@
|
||||
ALTER TABLE users DROP COLUMN IF EXISTS city;
|
||||
+1
@@ -0,0 +1 @@
|
||||
ALTER TABLE users ADD COLUMN city TEXT;
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP INDEX IF EXISTS users_email_index;
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS users_email_index ON users (email);
|
||||
|
||||
-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere.
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS books;
|
||||
+5
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE books (
|
||||
user_id INT,
|
||||
name STRING(40),
|
||||
author STRING(40)
|
||||
);
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS movies;
|
||||
+5
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE movies (
|
||||
user_id INT,
|
||||
name STRING(40),
|
||||
director STRING(40)
|
||||
);
|
||||
+1
@@ -0,0 +1 @@
|
||||
-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere.
|
||||
+1
@@ -0,0 +1 @@
|
||||
-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere.
|
||||
+1
@@ -0,0 +1 @@
|
||||
-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere.
|
||||
+1
@@ -0,0 +1 @@
|
||||
-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere.
|
||||
@@ -0,0 +1,123 @@
|
||||
// Package database provides the Driver interface.
|
||||
// All database drivers must implement this interface, register themselves,
|
||||
// optionally provide a `WithInstance` function and pass the tests
|
||||
// in package database/testing.
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
iurl "github.com/golang-migrate/migrate/v4/internal/url"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLocked = fmt.Errorf("can't acquire lock")
|
||||
ErrNotLocked = fmt.Errorf("can't unlock, as not currently locked")
|
||||
)
|
||||
|
||||
const NilVersion int = -1
|
||||
|
||||
var driversMu sync.RWMutex
|
||||
var drivers = make(map[string]Driver)
|
||||
|
||||
// Driver is the interface every database driver must implement.
|
||||
//
|
||||
// How to implement a database driver?
|
||||
// 1. Implement this interface.
|
||||
// 2. Optionally, add a function named `WithInstance`.
|
||||
// This function should accept an existing DB instance and a Config{} struct
|
||||
// and return a driver instance.
|
||||
// 3. Add a test that calls database/testing.go:Test()
|
||||
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
|
||||
// All other functions are tested by tests in database/testing.
|
||||
// Saves you some time and makes sure all database drivers behave the same way.
|
||||
// 5. Call Register in init().
|
||||
// 6. Create a internal/cli/build_<driver-name>.go file
|
||||
// 7. Add driver name in 'DATABASE' variable in Makefile
|
||||
//
|
||||
// Guidelines:
|
||||
// - Don't try to correct user input. Don't assume things.
|
||||
// When in doubt, return an error and explain the situation to the user.
|
||||
// - All configuration input must come from the URL string in func Open()
|
||||
// or the Config{} struct in WithInstance. Don't os.Getenv().
|
||||
type Driver interface {
|
||||
// Open returns a new driver instance configured with parameters
|
||||
// coming from the URL string. Migrate will call this function
|
||||
// only once per instance.
|
||||
Open(url string) (Driver, error)
|
||||
|
||||
// Close closes the underlying database instance managed by the driver.
|
||||
// Migrate will call this function only once per instance.
|
||||
Close() error
|
||||
|
||||
// Lock should acquire a database lock so that only one migration process
|
||||
// can run at a time. Migrate will call this function before Run is called.
|
||||
// If the implementation can't provide this functionality, return nil.
|
||||
// Return database.ErrLocked if database is already locked.
|
||||
Lock() error
|
||||
|
||||
// Unlock should release the lock. Migrate will call this function after
|
||||
// all migrations have been run.
|
||||
Unlock() error
|
||||
|
||||
// Run applies a migration to the database. migration is guaranteed to be not nil.
|
||||
Run(migration io.Reader) error
|
||||
|
||||
// SetVersion saves version and dirty state.
|
||||
// Migrate will call this function before and after each call to Run.
|
||||
// version must be >= -1. -1 means NilVersion.
|
||||
SetVersion(version int, dirty bool) error
|
||||
|
||||
// Version returns the currently active version and if the database is dirty.
|
||||
// When no migration has been applied, it must return version -1.
|
||||
// Dirty means, a previous migration failed and user interaction is required.
|
||||
Version() (version int, dirty bool, err error)
|
||||
|
||||
// Drop deletes everything in the database.
|
||||
// Note that this is a breaking action, a new call to Open() is necessary to
|
||||
// ensure subsequent calls work as expected.
|
||||
Drop() error
|
||||
}
|
||||
|
||||
// Open returns a new driver instance.
|
||||
func Open(url string) (Driver, error) {
|
||||
scheme, err := iurl.SchemeFromURL(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
driversMu.RLock()
|
||||
d, ok := drivers[scheme]
|
||||
driversMu.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", scheme)
|
||||
}
|
||||
|
||||
return d.Open(url)
|
||||
}
|
||||
|
||||
// Register globally registers a driver.
|
||||
func Register(name string, driver Driver) {
|
||||
driversMu.Lock()
|
||||
defer driversMu.Unlock()
|
||||
if driver == nil {
|
||||
panic("Register driver is nil")
|
||||
}
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("Register called twice for driver " + name)
|
||||
}
|
||||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// List lists the registered drivers
|
||||
func List() []string {
|
||||
driversMu.RLock()
|
||||
defer driversMu.RUnlock()
|
||||
names := make([]string, 0, len(drivers))
|
||||
for n := range drivers {
|
||||
names = append(names, n)
|
||||
}
|
||||
return names
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func ExampleDriver() {
|
||||
// see database/stub for an example
|
||||
|
||||
// database/stub/stub.go has the driver implementation
|
||||
// database/stub/stub_test.go runs database/testing/test.go:Test
|
||||
}
|
||||
|
||||
// Using database/stub here is not possible as it
|
||||
// results in an import cycle.
|
||||
type mockDriver struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (m *mockDriver) Open(url string) (Driver, error) {
|
||||
return &mockDriver{
|
||||
url: url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockDriver) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDriver) Lock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDriver) Unlock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDriver) Run(migration io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDriver) SetVersion(version int, dirty bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDriver) Version() (version int, dirty bool, err error) {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
func (m *mockDriver) Drop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRegisterTwice(t *testing.T) {
|
||||
Register("mock", &mockDriver{})
|
||||
|
||||
var err interface{}
|
||||
func() {
|
||||
defer func() {
|
||||
err = recover()
|
||||
}()
|
||||
Register("mock", &mockDriver{})
|
||||
}()
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("expected a panic when calling Register twice")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
// Make sure the driver is registered.
|
||||
// But if the previous test already registered it just ignore the panic.
|
||||
// If we don't do this it will be impossible to run this test standalone.
|
||||
func() {
|
||||
defer func() {
|
||||
_ = recover()
|
||||
}()
|
||||
Register("mock", &mockDriver{})
|
||||
}()
|
||||
|
||||
cases := []struct {
|
||||
url string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
"mock://user:pass@tcp(host:1337)/db",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"unknown://bla",
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.url, func(t *testing.T) {
|
||||
d, err := Open(c.url)
|
||||
|
||||
if err == nil {
|
||||
if c.err {
|
||||
t.Fatal("expected an error for an unknown driver")
|
||||
} else {
|
||||
if md, ok := d.(*mockDriver); !ok {
|
||||
t.Fatalf("expected *mockDriver got %T", d)
|
||||
} else if md.url != c.url {
|
||||
t.Fatalf("expected %q got %q", c.url, md.url)
|
||||
}
|
||||
}
|
||||
} else if !c.err {
|
||||
t.Fatalf("did not expect %q", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error should be used for errors involving queries ran against the database
|
||||
type Error struct {
|
||||
// Optional: the line number
|
||||
Line uint
|
||||
|
||||
// Query is a query excerpt
|
||||
Query []byte
|
||||
|
||||
// Err is a useful/helping error message for humans
|
||||
Err string
|
||||
|
||||
// OrigErr is the underlying error
|
||||
OrigErr error
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if len(e.Err) == 0 {
|
||||
return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query)
|
||||
}
|
||||
return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr)
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
# firebird
|
||||
|
||||
`firebirdsql://user:password@servername[:port_number]/database_name_or_file[?params1=value1[¶m2=value2]...]`
|
||||
|
||||
| URL Query | WithInstance Config | Description |
|
||||
|------------|---------------------|-------------|
|
||||
| `x-migrations-table` | `MigrationsTable` | Name of the migrations table |
|
||||
| `auth_plugin_name` | | Authentication plugin name. Srp256/Srp/Legacy_Auth are available. (default is Srp) |
|
||||
| `column_name_to_lower` | | Force column name to lower. (default is false) |
|
||||
| `role` | | Role name |
|
||||
| `tzname` | | Time Zone name. (For Firebird 4.0+) |
|
||||
| `wire_crypt` | | Enable wire data encryption or not. For Firebird 3.0+ (default is true) |
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE users;
|
||||
+5
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE users (
|
||||
user_id integer unique,
|
||||
name varchar(40),
|
||||
email varchar(40)
|
||||
);
|
||||
+1
@@ -0,0 +1 @@
|
||||
ALTER TABLE users DROP city;
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE users ADD city varchar(100);
|
||||
|
||||
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP INDEX users_email_index;
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
CREATE UNIQUE INDEX users_email_index ON users (email);
|
||||
|
||||
-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere.
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE books;
|
||||
+5
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE books (
|
||||
user_id integer,
|
||||
name varchar(40),
|
||||
author varchar(40)
|
||||
);
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE movies;
|
||||
+5
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE movies (
|
||||
user_id integer,
|
||||
name varchar(40),
|
||||
director varchar(40)
|
||||
);
|
||||
+259
@@ -0,0 +1,259 @@
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package firebird
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
_ "github.com/nakagami/firebirdsql"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
func init() {
|
||||
db := Firebird{}
|
||||
database.Register("firebird", &db)
|
||||
database.Register("firebirdsql", &db)
|
||||
}
|
||||
|
||||
var DefaultMigrationsTable = "schema_migrations"
|
||||
|
||||
var (
|
||||
ErrNilConfig = fmt.Errorf("no config")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
DatabaseName string
|
||||
MigrationsTable string
|
||||
}
|
||||
|
||||
type Firebird struct {
|
||||
// Locking and unlocking need to use the same connection
|
||||
conn *sql.Conn
|
||||
db *sql.DB
|
||||
isLocked atomic.Bool
|
||||
|
||||
// Open and WithInstance need to guarantee that config is never nil
|
||||
config *Config
|
||||
}
|
||||
|
||||
func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) {
|
||||
if config == nil {
|
||||
return nil, ErrNilConfig
|
||||
}
|
||||
|
||||
if err := instance.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.MigrationsTable) == 0 {
|
||||
config.MigrationsTable = DefaultMigrationsTable
|
||||
}
|
||||
|
||||
conn, err := instance.Conn(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fb := &Firebird{
|
||||
conn: conn,
|
||||
db: instance,
|
||||
config: config,
|
||||
}
|
||||
|
||||
if err := fb.ensureVersionTable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fb, nil
|
||||
}
|
||||
|
||||
func (f *Firebird) Open(dsn string) (database.Driver, error) {
|
||||
purl, err := nurl.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := sql.Open("firebirdsql", migrate.FilterCustomQuery(purl).String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
px, err := WithInstance(db, &Config{
|
||||
MigrationsTable: purl.Query().Get("x-migrations-table"),
|
||||
DatabaseName: purl.Path,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return px, nil
|
||||
}
|
||||
|
||||
func (f *Firebird) Close() error {
|
||||
connErr := f.conn.Close()
|
||||
dbErr := f.db.Close()
|
||||
if connErr != nil || dbErr != nil {
|
||||
return fmt.Errorf("conn: %v, db: %v", connErr, dbErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Firebird) Lock() error {
|
||||
if !f.isLocked.CAS(false, true) {
|
||||
return database.ErrLocked
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Firebird) Unlock() error {
|
||||
if !f.isLocked.CAS(true, false) {
|
||||
return database.ErrNotLocked
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Firebird) Run(migration io.Reader) error {
|
||||
migr, err := io.ReadAll(migration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// run migration
|
||||
query := string(migr[:])
|
||||
if _, err := f.conn.ExecContext(context.Background(), query); err != nil {
|
||||
return database.Error{OrigErr: err, Err: "migration failed", Query: migr}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Firebird) SetVersion(version int, dirty bool) error {
|
||||
// Always re-write the schema version to prevent empty schema version
|
||||
// for failed down migration on the first migration
|
||||
// See: https://github.com/golang-migrate/migrate/issues/330
|
||||
|
||||
// TODO: parameterize this SQL statement
|
||||
// https://firebirdsql.org/refdocs/langrefupd20-execblock.html
|
||||
// VALUES (?, ?) doesn't work
|
||||
query := fmt.Sprintf(`EXECUTE BLOCK AS BEGIN
|
||||
DELETE FROM "%v";
|
||||
INSERT INTO "%v" (version, dirty) VALUES (%v, %v);
|
||||
END;`,
|
||||
f.config.MigrationsTable, f.config.MigrationsTable, version, btoi(dirty))
|
||||
|
||||
if _, err := f.conn.ExecContext(context.Background(), query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Firebird) Version() (version int, dirty bool, err error) {
|
||||
var d int
|
||||
query := fmt.Sprintf(`SELECT FIRST 1 version, dirty FROM "%v"`, f.config.MigrationsTable)
|
||||
err = f.conn.QueryRowContext(context.Background(), query).Scan(&version, &d)
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return database.NilVersion, false, nil
|
||||
case err != nil:
|
||||
return 0, false, &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
|
||||
default:
|
||||
return version, itob(d), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Firebird) Drop() (err error) {
|
||||
// select all tables
|
||||
query := `SELECT rdb$relation_name FROM rdb$relations WHERE rdb$view_blr IS NULL AND (rdb$system_flag IS NULL OR rdb$system_flag = 0);`
|
||||
tables, err := f.conn.QueryContext(context.Background(), query)
|
||||
if err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
defer func() {
|
||||
if errClose := tables.Close(); errClose != nil {
|
||||
err = multierror.Append(err, errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
// delete one table after another
|
||||
tableNames := make([]string, 0)
|
||||
for tables.Next() {
|
||||
var tableName string
|
||||
if err := tables.Scan(&tableName); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(tableName) > 0 {
|
||||
tableNames = append(tableNames, tableName)
|
||||
}
|
||||
}
|
||||
if err := tables.Err(); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
// delete one by one ...
|
||||
for _, t := range tableNames {
|
||||
query := fmt.Sprintf(`EXECUTE BLOCK AS BEGIN
|
||||
if (not exists(select 1 from rdb$relations where rdb$relation_name = '%v')) then
|
||||
execute statement 'drop table "%v"';
|
||||
END;`,
|
||||
t, t)
|
||||
|
||||
if _, err := f.conn.ExecContext(context.Background(), query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureVersionTable checks if versions table exists and, if not, creates it.
|
||||
func (f *Firebird) ensureVersionTable() (err error) {
|
||||
if err = f.Lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := f.Unlock(); e != nil {
|
||||
if err == nil {
|
||||
err = e
|
||||
} else {
|
||||
err = multierror.Append(err, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
query := fmt.Sprintf(`EXECUTE BLOCK AS BEGIN
|
||||
if (not exists(select 1 from rdb$relations where rdb$relation_name = '%v')) then
|
||||
execute statement 'create table "%v" (version bigint not null primary key, dirty smallint not null)';
|
||||
END;`,
|
||||
f.config.MigrationsTable, f.config.MigrationsTable)
|
||||
|
||||
if _, err = f.conn.ExecContext(context.Background(), query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// btoi converts bool to int
|
||||
func btoi(v bool) int {
|
||||
if v {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// itob converts int to bool
|
||||
func itob(v int) bool {
|
||||
return v != 0
|
||||
}
|
||||
+226
@@ -0,0 +1,226 @@
|
||||
package firebird
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
sqldriver "database/sql/driver"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/dhui/dktest"
|
||||
|
||||
dt "github.com/golang-migrate/migrate/v4/database/testing"
|
||||
"github.com/golang-migrate/migrate/v4/dktesting"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
|
||||
_ "github.com/nakagami/firebirdsql"
|
||||
)
|
||||
|
||||
const (
|
||||
user = "test_user"
|
||||
password = "123456"
|
||||
dbName = "test.fdb"
|
||||
)
|
||||
|
||||
var (
|
||||
opts = dktest.Options{
|
||||
PortRequired: true,
|
||||
ReadyFunc: isReady,
|
||||
Env: map[string]string{
|
||||
"FIREBIRD_DATABASE": dbName,
|
||||
"FIREBIRD_USER": user,
|
||||
"FIREBIRD_PASSWORD": password,
|
||||
},
|
||||
}
|
||||
specs = []dktesting.ContainerSpec{
|
||||
{ImageName: "jacobalberty/firebird:2.5-ss", Options: opts},
|
||||
{ImageName: "jacobalberty/firebird:3.0", Options: opts},
|
||||
}
|
||||
)
|
||||
|
||||
func fbConnectionString(host, port string) string {
|
||||
//firebird://user:password@servername[:port_number]/database_name_or_file[?params1=value1[¶m2=value2]...]
|
||||
return fmt.Sprintf("firebird://%s:%s@%s:%s//firebird/data/%s", user, password, host, port, dbName)
|
||||
}
|
||||
|
||||
func isReady(ctx context.Context, c dktest.ContainerInfo) bool {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
db, err := sql.Open("firebirdsql", fbConnectionString(ip, port))
|
||||
if err != nil {
|
||||
log.Println("open error:", err)
|
||||
return false
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Println("close error:", err)
|
||||
}
|
||||
}()
|
||||
if err = db.PingContext(ctx); err != nil {
|
||||
switch err {
|
||||
case sqldriver.ErrBadConn, io.EOF:
|
||||
return false
|
||||
default:
|
||||
log.Println(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fbConnectionString(ip, port)
|
||||
p := &Firebird{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
dt.Test(t, d, []byte("SELECT Count(*) FROM rdb$relations"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fbConnectionString(ip, port)
|
||||
p := &Firebird{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "firebirdsql", d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.TestMigrate(t, m)
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrorParsing(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fbConnectionString(ip, port)
|
||||
p := &Firebird{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
wantErr := `migration failed in line 0: CREATE TABLEE foo (foo varchar(40)); (details: Dynamic SQL Error
|
||||
SQL error code = -104
|
||||
Token unknown - line 1, column 8
|
||||
TABLEE
|
||||
)`
|
||||
|
||||
if err := d.Run(strings.NewReader("CREATE TABLEE foo (foo varchar(40));")); err == nil {
|
||||
t.Fatal("expected err but got nil")
|
||||
} else if err.Error() != wantErr {
|
||||
msg := err.Error()
|
||||
t.Fatalf("expected '%s' but got '%s'", wantErr, msg)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterCustomQuery(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fbConnectionString(ip, port) + "?sslmode=disable&x-custom=foobar"
|
||||
p := &Firebird{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func Test_Lock(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fbConnectionString(ip, port)
|
||||
p := &Firebird{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
dt.Test(t, d, []byte("SELECT Count(*) FROM rdb$relations"))
|
||||
|
||||
ps := d.(*Firebird)
|
||||
|
||||
err = ps.Lock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ps.Unlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ps.Lock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ps.Unlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
# MongoDB
|
||||
|
||||
* Driver work with mongo through [db.runCommands](https://docs.mongodb.com/manual/reference/command/)
|
||||
* Migrations support json format. It contains array of commands for `db.runCommand`. Every command is executed in separate request to database
|
||||
* All keys have to be in quotes `"`
|
||||
* [Examples](./examples)
|
||||
|
||||
# Usage
|
||||
|
||||
`mongodb://user:password@host:port/dbname?query` (`mongodb+srv://` also works, but behaves a bit differently. See [docs](https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format) for more information)
|
||||
|
||||
| URL Query | WithInstance Config | Description |
|
||||
|------------|---------------------|-------------|
|
||||
| `x-migrations-collection` | `MigrationsCollection` | Name of the migrations collection |
|
||||
| `x-transaction-mode` | `TransactionMode` | If set to `true` wrap commands in [transaction](https://docs.mongodb.com/manual/core/transactions). Available only for replica set. Driver is using [strconv.ParseBool](https://golang.org/pkg/strconv/#ParseBool) for parsing|
|
||||
| `x-advisory-locking` | `true` | Feature flag for advisory locking, if set to false, disable advisory locking |
|
||||
| `x-advisory-lock-collection` | `migrate_advisory_lock` | The name of the collection to use for advisory locking.|
|
||||
| `x-advisory-lock-timeout` | `15` | The max time in seconds that migrate will wait to acquire a lock before failing. |
|
||||
| `x-advisory-lock-timeout-interval` | `10` | The max time in seconds between attempts to acquire the advisory lock, the lock is attempted to be acquired using an exponential backoff algorithm. |
|
||||
| `dbname` | `DatabaseName` | The name of the database to connect to |
|
||||
| `user` | | The user to sign in as. Can be omitted |
|
||||
| `password` | | The user's password. Can be omitted |
|
||||
| `host` | | The host to connect to |
|
||||
| `port` | | The port to bind to |
|
||||
+5
@@ -0,0 +1,5 @@
|
||||
[
|
||||
{
|
||||
"dropUser": "deminem"
|
||||
}
|
||||
]
|
||||
+12
@@ -0,0 +1,12 @@
|
||||
[
|
||||
{
|
||||
"createUser": "deminem",
|
||||
"pwd": "gogo",
|
||||
"roles": [
|
||||
{
|
||||
"role": "readWrite",
|
||||
"db": "testMigration"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
+10
@@ -0,0 +1,10 @@
|
||||
[
|
||||
{
|
||||
"dropIndexes": "mycollection",
|
||||
"index": "username_sort_by_asc_created"
|
||||
},
|
||||
{
|
||||
"dropIndexes": "mycollection",
|
||||
"index": "unique_email"
|
||||
}
|
||||
]
|
||||
+21
@@ -0,0 +1,21 @@
|
||||
[{
|
||||
"createIndexes": "mycollection",
|
||||
"indexes": [
|
||||
{
|
||||
"key": {
|
||||
"username": 1,
|
||||
"created": -1
|
||||
},
|
||||
"name": "username_sort_by_asc_created",
|
||||
"background": true
|
||||
},
|
||||
{
|
||||
"key": {
|
||||
"email": 1
|
||||
},
|
||||
"name": "unique_email",
|
||||
"unique": true,
|
||||
"background": true
|
||||
}
|
||||
]
|
||||
}]
|
||||
+16
@@ -0,0 +1,16 @@
|
||||
[
|
||||
{
|
||||
"update": "users",
|
||||
"updates": [
|
||||
{
|
||||
"q": {},
|
||||
"u": {
|
||||
"$unset": {
|
||||
"status": ""
|
||||
}
|
||||
},
|
||||
"multi": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
+16
@@ -0,0 +1,16 @@
|
||||
[
|
||||
{
|
||||
"update": "users",
|
||||
"updates": [
|
||||
{
|
||||
"q": {},
|
||||
"u": {
|
||||
"$set": {
|
||||
"status": "active"
|
||||
}
|
||||
},
|
||||
"multi": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
+14
@@ -0,0 +1,14 @@
|
||||
[
|
||||
{
|
||||
"update": "users",
|
||||
"updates": [
|
||||
{
|
||||
"q": {},
|
||||
"u": {
|
||||
"fullname": ""
|
||||
},
|
||||
"multi": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
+23
@@ -0,0 +1,23 @@
|
||||
[
|
||||
{
|
||||
"aggregate": "users",
|
||||
"pipeline": [
|
||||
{
|
||||
"$project": {
|
||||
"_id": 1,
|
||||
"firstname": 1,
|
||||
"lastname": 1,
|
||||
"username": 1,
|
||||
"password": 1,
|
||||
"email": 1,
|
||||
"active": 1,
|
||||
"fullname": { "$concat": ["$firstname", " ", "$lastname"] }
|
||||
}
|
||||
},
|
||||
{
|
||||
"$out": "users"
|
||||
}
|
||||
],
|
||||
"cursor": {}
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,404 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
func init() {
|
||||
db := Mongo{}
|
||||
database.Register("mongodb", &db)
|
||||
database.Register("mongodb+srv", &db)
|
||||
}
|
||||
|
||||
var DefaultMigrationsCollection = "schema_migrations"
|
||||
|
||||
const DefaultLockingCollection = "migrate_advisory_lock" // the collection to use for advisory locking by default.
|
||||
const lockKeyUniqueValue = 0 // the unique value to lock on. If multiple clients try to insert the same key, it will fail (locked).
|
||||
const DefaultLockTimeout = 15 // the default maximum time to wait for a lock to be released.
|
||||
const DefaultLockTimeoutInterval = 10 // the default maximum intervals time for the locking timout.
|
||||
const DefaultAdvisoryLockingFlag = true // the default value for the advisory locking feature flag. Default is true.
|
||||
const LockIndexName = "lock_unique_key" // the name of the index which adds unique constraint to the locking_key field.
|
||||
const contextWaitTimeout = 5 * time.Second // how long to wait for the request to mongo to block/wait for.
|
||||
|
||||
var (
|
||||
ErrNoDatabaseName = fmt.Errorf("no database name")
|
||||
ErrNilConfig = fmt.Errorf("no config")
|
||||
ErrLockTimeoutConfigConflict = fmt.Errorf("both x-advisory-lock-timeout-interval and x-advisory-lock-timout-interval were specified")
|
||||
)
|
||||
|
||||
type Mongo struct {
|
||||
client *mongo.Client
|
||||
db *mongo.Database
|
||||
config *Config
|
||||
isLocked atomic.Bool
|
||||
}
|
||||
|
||||
type Locking struct {
|
||||
CollectionName string
|
||||
Timeout int
|
||||
Enabled bool
|
||||
Interval int
|
||||
}
|
||||
type Config struct {
|
||||
DatabaseName string
|
||||
MigrationsCollection string
|
||||
TransactionMode bool
|
||||
Locking Locking
|
||||
}
|
||||
type versionInfo struct {
|
||||
Version int `bson:"version"`
|
||||
Dirty bool `bson:"dirty"`
|
||||
}
|
||||
|
||||
type lockObj struct {
|
||||
Key int `bson:"locking_key"`
|
||||
Pid int `bson:"pid"`
|
||||
Hostname string `bson:"hostname"`
|
||||
CreatedAt time.Time `bson:"created_at"`
|
||||
}
|
||||
type findFilter struct {
|
||||
Key int `bson:"locking_key"`
|
||||
}
|
||||
|
||||
func WithInstance(instance *mongo.Client, config *Config) (database.Driver, error) {
|
||||
if config == nil {
|
||||
return nil, ErrNilConfig
|
||||
}
|
||||
if len(config.DatabaseName) == 0 {
|
||||
return nil, ErrNoDatabaseName
|
||||
}
|
||||
if len(config.MigrationsCollection) == 0 {
|
||||
config.MigrationsCollection = DefaultMigrationsCollection
|
||||
}
|
||||
if len(config.Locking.CollectionName) == 0 {
|
||||
config.Locking.CollectionName = DefaultLockingCollection
|
||||
}
|
||||
if config.Locking.Timeout <= 0 {
|
||||
config.Locking.Timeout = DefaultLockTimeout
|
||||
}
|
||||
if config.Locking.Interval <= 0 {
|
||||
config.Locking.Interval = DefaultLockTimeoutInterval
|
||||
}
|
||||
|
||||
mc := &Mongo{
|
||||
client: instance,
|
||||
db: instance.Database(config.DatabaseName),
|
||||
config: config,
|
||||
}
|
||||
|
||||
if mc.config.Locking.Enabled {
|
||||
if err := mc.ensureLockTable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := mc.ensureVersionTable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
func (m *Mongo) Open(dsn string) (database.Driver, error) {
|
||||
// connstring is experimental package, but it used for parse connection string in mongo.Connect function
|
||||
uri, err := connstring.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(uri.Database) == 0 {
|
||||
return nil, ErrNoDatabaseName
|
||||
}
|
||||
unknown := url.Values(uri.UnknownOptions)
|
||||
|
||||
migrationsCollection := unknown.Get("x-migrations-collection")
|
||||
lockCollection := unknown.Get("x-advisory-lock-collection")
|
||||
transactionMode, err := parseBoolean(unknown.Get("x-transaction-mode"), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
advisoryLockingFlag, err := parseBoolean(unknown.Get("x-advisory-locking"), DefaultAdvisoryLockingFlag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockingTimout, err := parseInt(unknown.Get("x-advisory-lock-timeout"), DefaultLockTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lockTimeoutIntervalValue := unknown.Get("x-advisory-lock-timeout-interval")
|
||||
// The initial release had a typo for this argument but for backwards compatibility sake, we will keep supporting it
|
||||
// and we will error out if both values are set.
|
||||
lockTimeoutIntervalValueFromTypo := unknown.Get("x-advisory-lock-timout-interval")
|
||||
|
||||
lockTimeout := lockTimeoutIntervalValue
|
||||
|
||||
if lockTimeoutIntervalValue != "" && lockTimeoutIntervalValueFromTypo != "" {
|
||||
return nil, ErrLockTimeoutConfigConflict
|
||||
} else if lockTimeoutIntervalValueFromTypo != "" {
|
||||
lockTimeout = lockTimeoutIntervalValueFromTypo
|
||||
}
|
||||
|
||||
maxLockCheckInterval, err := parseInt(lockTimeout, DefaultLockTimeoutInterval)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(dsn))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = client.Ping(context.TODO(), nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mc, err := WithInstance(client, &Config{
|
||||
DatabaseName: uri.Database,
|
||||
MigrationsCollection: migrationsCollection,
|
||||
TransactionMode: transactionMode,
|
||||
Locking: Locking{
|
||||
CollectionName: lockCollection,
|
||||
Timeout: lockingTimout,
|
||||
Enabled: advisoryLockingFlag,
|
||||
Interval: maxLockCheckInterval,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
// Parse the url param, convert it to boolean
|
||||
// returns error if param invalid. returns defaultValue if param not present
|
||||
func parseBoolean(urlParam string, defaultValue bool) (bool, error) {
|
||||
|
||||
// if parameter passed, parse it (otherwise return default value)
|
||||
if urlParam != "" {
|
||||
result, err := strconv.ParseBool(urlParam)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// if no url Param passed, return default value
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
// Parse the url param, convert it to int
|
||||
// returns error if param invalid. returns defaultValue if param not present
|
||||
func parseInt(urlParam string, defaultValue int) (int, error) {
|
||||
|
||||
// if parameter passed, parse it (otherwise return default value)
|
||||
if urlParam != "" {
|
||||
result, err := strconv.Atoi(urlParam)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// if no url Param passed, return default value
|
||||
return defaultValue, nil
|
||||
}
|
||||
func (m *Mongo) SetVersion(version int, dirty bool) error {
|
||||
migrationsCollection := m.db.Collection(m.config.MigrationsCollection)
|
||||
if err := migrationsCollection.Drop(context.TODO()); err != nil {
|
||||
return &database.Error{OrigErr: err, Err: "drop migrations collection failed"}
|
||||
}
|
||||
_, err := migrationsCollection.InsertOne(context.TODO(), bson.M{"version": version, "dirty": dirty})
|
||||
if err != nil {
|
||||
return &database.Error{OrigErr: err, Err: "save version failed"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mongo) Version() (version int, dirty bool, err error) {
|
||||
var versionInfo versionInfo
|
||||
err = m.db.Collection(m.config.MigrationsCollection).FindOne(context.TODO(), bson.M{}).Decode(&versionInfo)
|
||||
switch {
|
||||
case err == mongo.ErrNoDocuments:
|
||||
return database.NilVersion, false, nil
|
||||
case err != nil:
|
||||
return 0, false, &database.Error{OrigErr: err, Err: "failed to get migration version"}
|
||||
default:
|
||||
return versionInfo.Version, versionInfo.Dirty, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Mongo) Run(migration io.Reader) error {
|
||||
migr, err := io.ReadAll(migration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var cmds []bson.D
|
||||
err = bson.UnmarshalExtJSON(migr, true, &cmds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unmarshaling json error: %s", err)
|
||||
}
|
||||
if m.config.TransactionMode {
|
||||
if err := m.executeCommandsWithTransaction(context.TODO(), cmds); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := m.executeCommands(context.TODO(), cmds); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mongo) executeCommandsWithTransaction(ctx context.Context, cmds []bson.D) error {
|
||||
err := m.db.Client().UseSession(ctx, func(sessionContext mongo.SessionContext) error {
|
||||
if err := sessionContext.StartTransaction(); err != nil {
|
||||
return &database.Error{OrigErr: err, Err: "failed to start transaction"}
|
||||
}
|
||||
if err := m.executeCommands(sessionContext, cmds); err != nil {
|
||||
// When command execution is failed, it's aborting transaction
|
||||
// If you tried to call abortTransaction, it`s return error that transaction already aborted
|
||||
return err
|
||||
}
|
||||
if err := sessionContext.CommitTransaction(sessionContext); err != nil {
|
||||
return &database.Error{OrigErr: err, Err: "failed to commit transaction"}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mongo) executeCommands(ctx context.Context, cmds []bson.D) error {
|
||||
for _, cmd := range cmds {
|
||||
err := m.db.RunCommand(ctx, cmd).Err()
|
||||
if err != nil {
|
||||
return &database.Error{OrigErr: err, Err: fmt.Sprintf("failed to execute command:%v", cmd)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mongo) Close() error {
|
||||
return m.client.Disconnect(context.TODO())
|
||||
}
|
||||
|
||||
func (m *Mongo) Drop() error {
|
||||
return m.db.Drop(context.TODO())
|
||||
}
|
||||
|
||||
func (m *Mongo) ensureLockTable() error {
|
||||
indexes := m.db.Collection(m.config.Locking.CollectionName).Indexes()
|
||||
|
||||
indexOptions := options.Index().SetUnique(true).SetName(LockIndexName)
|
||||
_, err := indexes.CreateOne(context.TODO(), mongo.IndexModel{
|
||||
Options: indexOptions,
|
||||
Keys: findFilter{Key: -1},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureVersionTable checks if versions table exists and, if not, creates it.
|
||||
// Note that this function locks the database, which deviates from the usual
|
||||
// convention of "caller locks" in the MongoDb type.
|
||||
func (m *Mongo) ensureVersionTable() (err error) {
|
||||
if err = m.Lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := m.Unlock(); e != nil {
|
||||
if err == nil {
|
||||
err = e
|
||||
} else {
|
||||
err = multierror.Append(err, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, err = m.Version(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Utilizes advisory locking on the config.LockingCollection collection
|
||||
// This uses a unique index on the `locking_key` field.
|
||||
func (m *Mongo) Lock() error {
|
||||
return database.CasRestoreOnErr(&m.isLocked, false, true, database.ErrLocked, func() error {
|
||||
if !m.config.Locking.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
pid := os.Getpid()
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostname = fmt.Sprintf("Could not determine hostname. Error: %s", err.Error())
|
||||
}
|
||||
|
||||
newLockObj := lockObj{
|
||||
Key: lockKeyUniqueValue,
|
||||
Pid: pid,
|
||||
Hostname: hostname,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
operation := func() error {
|
||||
timeout, cancelFunc := context.WithTimeout(context.Background(), contextWaitTimeout)
|
||||
_, err := m.db.Collection(m.config.Locking.CollectionName).InsertOne(timeout, newLockObj)
|
||||
defer cancelFunc()
|
||||
return err
|
||||
}
|
||||
exponentialBackOff := backoff.NewExponentialBackOff()
|
||||
duration := time.Duration(m.config.Locking.Timeout) * time.Second
|
||||
exponentialBackOff.MaxElapsedTime = duration
|
||||
exponentialBackOff.MaxInterval = time.Duration(m.config.Locking.Interval) * time.Second
|
||||
|
||||
err = backoff.Retry(operation, exponentialBackOff)
|
||||
if err != nil {
|
||||
return database.ErrLocked
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Mongo) Unlock() error {
|
||||
return database.CasRestoreOnErr(&m.isLocked, true, false, database.ErrNotLocked, func() error {
|
||||
if !m.config.Locking.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
filter := findFilter{
|
||||
Key: lockKeyUniqueValue,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), contextWaitTimeout)
|
||||
_, err := m.db.Collection(m.config.Locking.CollectionName).DeleteMany(ctx, filter)
|
||||
defer cancel()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
+430
@@ -0,0 +1,430 @@
|
||||
package mongodb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"log"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/dhui/dktest"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
import (
|
||||
dt "github.com/golang-migrate/migrate/v4/database/testing"
|
||||
"github.com/golang-migrate/migrate/v4/dktesting"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
var (
|
||||
opts = dktest.Options{PortRequired: true, ReadyFunc: isReady}
|
||||
// Supported versions: https://www.mongodb.com/support-policy
|
||||
specs = []dktesting.ContainerSpec{
|
||||
{ImageName: "mongo:3.4", Options: opts},
|
||||
{ImageName: "mongo:3.6", Options: opts},
|
||||
{ImageName: "mongo:4.0", Options: opts},
|
||||
{ImageName: "mongo:4.2", Options: opts},
|
||||
}
|
||||
)
|
||||
|
||||
func mongoConnectionString(host, port string) string {
|
||||
// there is connect option for excluding serverConnection algorithm
|
||||
// it's let avoid errors with mongo replica set connection in docker container
|
||||
return fmt.Sprintf("mongodb://%s:%s/testMigration?connect=direct", host, port)
|
||||
}
|
||||
|
||||
func isReady(ctx context.Context, c dktest.ContainerInfo) bool {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongoConnectionString(ip, port)))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer func() {
|
||||
if err := client.Disconnect(ctx); err != nil {
|
||||
log.Println("close error:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = client.Ping(ctx, nil); err != nil {
|
||||
switch err {
|
||||
case io.EOF:
|
||||
return false
|
||||
default:
|
||||
log.Println(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
t.Run("test", test)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
t.Run("testWithAuth", testWithAuth)
|
||||
t.Run("testLockWorks", testLockWorks)
|
||||
|
||||
t.Cleanup(func() {
|
||||
for _, spec := range specs {
|
||||
t.Log("Cleaning up ", spec.ImageName)
|
||||
if err := spec.Cleanup(); err != nil {
|
||||
t.Error("Error removing ", spec.ImageName, "error:", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func test(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := mongoConnectionString(ip, port)
|
||||
p := &Mongo{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
dt.TestNilVersion(t, d)
|
||||
dt.TestLockAndUnlock(t, d)
|
||||
dt.TestRun(t, d, bytes.NewReader([]byte(`[{"insert":"hello","documents":[{"wild":"world"}]}]`)))
|
||||
dt.TestSetVersion(t, d)
|
||||
dt.TestDrop(t, d)
|
||||
})
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := mongoConnectionString(ip, port)
|
||||
p := &Mongo{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "", d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.TestMigrate(t, m)
|
||||
})
|
||||
}
|
||||
|
||||
func testWithAuth(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := mongoConnectionString(ip, port)
|
||||
p := &Mongo{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
createUserCMD := []byte(`[{"createUser":"deminem","pwd":"gogo","roles":[{"role":"readWrite","db":"testMigration"}]}]`)
|
||||
err = d.Run(bytes.NewReader(createUserCMD))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testcases := []struct {
|
||||
name string
|
||||
connectUri string
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{"right auth data", "mongodb://deminem:gogo@%s:%v/testMigration", false},
|
||||
{"wrong auth data", "mongodb://wrong:auth@%s:%v/testMigration", true},
|
||||
}
|
||||
|
||||
for _, tcase := range testcases {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
mc := &Mongo{}
|
||||
d, err := mc.Open(fmt.Sprintf(tcase.connectUri, ip, port))
|
||||
if err == nil {
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
switch {
|
||||
case tcase.isErrorExpected && err == nil:
|
||||
t.Fatalf("no error when expected")
|
||||
case !tcase.isErrorExpected && err != nil:
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testLockWorks(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := mongoConnectionString(ip, port)
|
||||
p := &Mongo{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
dt.TestRun(t, d, bytes.NewReader([]byte(`[{"insert":"hello","documents":[{"wild":"world"}]}]`)))
|
||||
|
||||
mc := d.(*Mongo)
|
||||
|
||||
err = mc.Lock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = mc.Unlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mc.Lock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = mc.Unlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// enable locking,
|
||||
//try to hit a lock conflict
|
||||
mc.config.Locking.Enabled = true
|
||||
mc.config.Locking.Timeout = 1
|
||||
err = mc.Lock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = mc.Lock()
|
||||
if err == nil {
|
||||
t.Fatal("should have failed, mongo should be locked already")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestTransaction(t *testing.T) {
|
||||
transactionSpecs := []dktesting.ContainerSpec{
|
||||
{ImageName: "mongo:4", Options: dktest.Options{PortRequired: true, ReadyFunc: isReady,
|
||||
Cmd: []string{"mongod", "--bind_ip_all", "--replSet", "rs0"}}},
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
for _, spec := range transactionSpecs {
|
||||
t.Log("Cleaning up ", spec.ImageName)
|
||||
if err := spec.Cleanup(); err != nil {
|
||||
t.Error("Error removing ", spec.ImageName, "error:", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
dktesting.ParallelTest(t, transactionSpecs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.FirstPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongoConnectionString(ip, port)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = client.Ping(context.TODO(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//rs.initiate()
|
||||
err = client.Database("admin").RunCommand(context.TODO(), bson.D{bson.E{Key: "replSetInitiate", Value: bson.D{}}}).Err()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = waitForReplicaInit(client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d, err := WithInstance(client, &Config{
|
||||
DatabaseName: "testMigration",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
//We have to create collection
|
||||
//transactions don't support operations with creating new dbs, collections
|
||||
//Unique index need for checking transaction aborting
|
||||
insertCMD := []byte(`[
|
||||
{"create":"hello"},
|
||||
{"createIndexes": "hello",
|
||||
"indexes": [{
|
||||
"key": {
|
||||
"wild": 1
|
||||
},
|
||||
"name": "unique_wild",
|
||||
"unique": true,
|
||||
"background": true
|
||||
}]
|
||||
}]`)
|
||||
err = d.Run(bytes.NewReader(insertCMD))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testcases := []struct {
|
||||
name string
|
||||
cmds []byte
|
||||
documentsCount int64
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{
|
||||
name: "success transaction",
|
||||
cmds: []byte(`[{"insert":"hello","documents":[
|
||||
{"wild":"world"},
|
||||
{"wild":"west"},
|
||||
{"wild":"natural"}
|
||||
]
|
||||
}]`),
|
||||
documentsCount: 3,
|
||||
isErrorExpected: false,
|
||||
},
|
||||
{
|
||||
name: "failure transaction",
|
||||
//transaction have to be failure - duplicate unique key wild:west
|
||||
//none of the documents should be added
|
||||
cmds: []byte(`[{"insert":"hello","documents":[{"wild":"flower"}]},
|
||||
{"insert":"hello","documents":[
|
||||
{"wild":"cat"},
|
||||
{"wild":"west"}
|
||||
]
|
||||
}]`),
|
||||
documentsCount: 3,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
}
|
||||
for _, tcase := range testcases {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(mongoConnectionString(ip, port)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = client.Ping(context.TODO(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
d, err := WithInstance(client, &Config{
|
||||
DatabaseName: "testMigration",
|
||||
TransactionMode: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
runErr := d.Run(bytes.NewReader(tcase.cmds))
|
||||
if runErr != nil {
|
||||
if !tcase.isErrorExpected {
|
||||
t.Fatal(runErr)
|
||||
}
|
||||
}
|
||||
documentsCount, err := client.Database("testMigration").Collection("hello").CountDocuments(context.TODO(), bson.M{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tcase.documentsCount != documentsCount {
|
||||
t.Fatalf("expected %d and actual %d documents count not equal. run migration error:%s", tcase.documentsCount, documentsCount, runErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type isMaster struct {
|
||||
IsMaster bool `bson:"ismaster"`
|
||||
}
|
||||
|
||||
func waitForReplicaInit(client *mongo.Client) error {
|
||||
ticker := time.NewTicker(time.Second * 1)
|
||||
defer ticker.Stop()
|
||||
timeout, err := strconv.Atoi(os.Getenv("MIGRATE_TEST_MONGO_REPLICA_SET_INIT_TIMEOUT"))
|
||||
if err != nil {
|
||||
timeout = 30
|
||||
}
|
||||
timeoutTimer := time.NewTimer(time.Duration(timeout) * time.Second)
|
||||
defer timeoutTimer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
var status isMaster
|
||||
//Check that node is primary because
|
||||
//during replica set initialization, the first node first becomes a secondary and then becomes the primary
|
||||
//should consider that initialization is completed only after the node has become the primary
|
||||
result := client.Database("admin").RunCommand(context.TODO(), bson.D{bson.E{Key: "isMaster", Value: 1}})
|
||||
r, err := result.DecodeBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bson.Unmarshal(r, &status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status.IsMaster {
|
||||
return nil
|
||||
}
|
||||
case <-timeoutTimer.C:
|
||||
return fmt.Errorf("replica init timeout")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
// Package multistmt provides methods for parsing multi-statement database migrations
|
||||
package multistmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// StartBufSize is the default starting size of the buffer used to scan and parse multi-statement migrations
|
||||
var StartBufSize = 4096
|
||||
|
||||
// Handler handles a single migration parsed from a multi-statement migration.
|
||||
// It's given the single migration to handle and returns whether or not further statements
|
||||
// from the multi-statement migration should be parsed and handled.
|
||||
type Handler func(migration []byte) bool
|
||||
|
||||
func splitWithDelimiter(delimiter []byte) func(d []byte, atEOF bool) (int, []byte, error) {
|
||||
return func(d []byte, atEOF bool) (int, []byte, error) {
|
||||
// SplitFunc inspired by bufio.ScanLines() implementation
|
||||
if atEOF {
|
||||
if len(d) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
return len(d), d, nil
|
||||
}
|
||||
if i := bytes.Index(d, delimiter); i >= 0 {
|
||||
return i + len(delimiter), d[:i+len(delimiter)], nil
|
||||
}
|
||||
return 0, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Parse parses the given multi-statement migration
|
||||
func Parse(reader io.Reader, delimiter []byte, maxMigrationSize int, h Handler) error {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
scanner.Buffer(make([]byte, 0, StartBufSize), maxMigrationSize)
|
||||
scanner.Split(splitWithDelimiter(delimiter))
|
||||
for scanner.Scan() {
|
||||
cont := h(scanner.Bytes())
|
||||
if !cont {
|
||||
break
|
||||
}
|
||||
}
|
||||
return scanner.Err()
|
||||
}
|
||||
+57
@@ -0,0 +1,57 @@
|
||||
package multistmt_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4/database/multistmt"
|
||||
)
|
||||
|
||||
const maxMigrationSize = 1024
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
multiStmt string
|
||||
delimiter string
|
||||
expected []string
|
||||
expectedErr error
|
||||
}{
|
||||
{name: "single statement, no delimiter", multiStmt: "single statement, no delimiter", delimiter: ";",
|
||||
expected: []string{"single statement, no delimiter"}, expectedErr: nil},
|
||||
{name: "single statement, one delimiter", multiStmt: "single statement, one delimiter;", delimiter: ";",
|
||||
expected: []string{"single statement, one delimiter;"}, expectedErr: nil},
|
||||
{name: "two statements, no trailing delimiter", multiStmt: "statement one; statement two", delimiter: ";",
|
||||
expected: []string{"statement one;", " statement two"}, expectedErr: nil},
|
||||
{name: "two statements, with trailing delimiter", multiStmt: "statement one; statement two;", delimiter: ";",
|
||||
expected: []string{"statement one;", " statement two;"}, expectedErr: nil},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
stmts := make([]string, 0, len(tc.expected))
|
||||
err := multistmt.Parse(strings.NewReader(tc.multiStmt), []byte(tc.delimiter), maxMigrationSize, func(b []byte) bool {
|
||||
stmts = append(stmts, string(b))
|
||||
return true
|
||||
})
|
||||
assert.Equal(t, tc.expectedErr, err)
|
||||
assert.Equal(t, tc.expected, stmts)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseDiscontinue(t *testing.T) {
|
||||
multiStmt := "statement one; statement two"
|
||||
delimiter := ";"
|
||||
expected := []string{"statement one;"}
|
||||
|
||||
stmts := make([]string, 0, len(expected))
|
||||
err := multistmt.Parse(strings.NewReader(multiStmt), []byte(delimiter), maxMigrationSize, func(b []byte) bool {
|
||||
stmts = append(stmts, string(b))
|
||||
return false
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, expected, stmts)
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
# MySQL
|
||||
|
||||
`mysql://user:password@tcp(host:port)/dbname?query`
|
||||
|
||||
| URL Query | WithInstance Config | Description |
|
||||
|------------|---------------------|-------------|
|
||||
| `x-migrations-table` | `MigrationsTable` | Name of the migrations table |
|
||||
| `x-no-lock` | `NoLock` | Set to `true` to skip `GET_LOCK`/`RELEASE_LOCK` statements. Useful for [multi-master MySQL flavors](https://www.percona.com/doc/percona-xtradb-cluster/LATEST/features/pxc-strict-mode.html#explicit-table-locking). Only run migrations from one host when this is enabled. |
|
||||
| `x-statement-timeout` | `StatementTimeout` | Abort any statement that takes more than the specified number of milliseconds, functionally similar to [Server-side SELECT statement timeouts](https://dev.mysql.com/blog-archive/server-side-select-statement-timeouts/) but enforced by the client. Available for all versions of MySQL, not just >=5.7. |
|
||||
| `dbname` | `DatabaseName` | The name of the database to connect to |
|
||||
| `user` | | The user to sign in as |
|
||||
| `password` | | The user's password |
|
||||
| `host` | | The host to connect to. |
|
||||
| `port` | | The port to bind to. |
|
||||
| `tls` | | TLS / SSL encrypted connection parameter; see [go-sql-driver](https://github.com/go-sql-driver/mysql#tls). Use any name (e.g. `migrate`) if you want to use a custom TLS config (`x-tls-` queries). |
|
||||
| `x-tls-ca` | | The location of the CA (certificate authority) file. |
|
||||
| `x-tls-cert` | | The location of the client certificate file. Must be used with `x-tls-key`. |
|
||||
| `x-tls-key` | | The location of the private key file. Must be used with `x-tls-cert`. |
|
||||
| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) |
|
||||
|
||||
## Use with existing client
|
||||
|
||||
If you use the MySQL driver with existing database client, you must create the client with parameter `multiStatements=true`:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database/mysql"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, _ := sql.Open("mysql", "user:password@tcp(host:port)/dbname?multiStatements=true")
|
||||
driver, _ := mysql.WithInstance(db, &mysql.Config{})
|
||||
m, _ := migrate.NewWithDatabaseInstance(
|
||||
"file:///migrations",
|
||||
"mysql",
|
||||
driver,
|
||||
)
|
||||
|
||||
m.Steps(2)
|
||||
}
|
||||
```
|
||||
|
||||
## Upgrading from v1
|
||||
|
||||
1. Write down the current migration version from schema_migrations
|
||||
1. `DROP TABLE schema_migrations`
|
||||
2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration.
|
||||
3. Download and install the latest migrate version.
|
||||
4. Force the current migration version with `migrate force <current_version>`.
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP TABLE IF EXISTS test;
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
CREATE TABLE IF NOT EXISTS test (
|
||||
firstname VARCHAR(16)
|
||||
);
|
||||
@@ -0,0 +1,514 @@
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
var _ database.Driver = (*Mysql)(nil) // explicit compile time type check
|
||||
|
||||
func init() {
|
||||
database.Register("mysql", &Mysql{})
|
||||
}
|
||||
|
||||
var DefaultMigrationsTable = "schema_migrations"
|
||||
|
||||
var (
|
||||
ErrDatabaseDirty = fmt.Errorf("database is dirty")
|
||||
ErrNilConfig = fmt.Errorf("no config")
|
||||
ErrNoDatabaseName = fmt.Errorf("no database name")
|
||||
ErrAppendPEM = fmt.Errorf("failed to append PEM")
|
||||
ErrTLSCertKeyConfig = fmt.Errorf("To use TLS client authentication, both x-tls-cert and x-tls-key must not be empty")
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
MigrationsTable string
|
||||
DatabaseName string
|
||||
NoLock bool
|
||||
StatementTimeout time.Duration
|
||||
}
|
||||
|
||||
type Mysql struct {
|
||||
// mysql RELEASE_LOCK must be called from the same conn, so
|
||||
// just do everything over a single conn anyway.
|
||||
conn *sql.Conn
|
||||
db *sql.DB
|
||||
isLocked atomic.Bool
|
||||
|
||||
config *Config
|
||||
}
|
||||
|
||||
// connection instance must have `multiStatements` set to true
|
||||
func WithConnection(ctx context.Context, conn *sql.Conn, config *Config) (*Mysql, error) {
|
||||
if config == nil {
|
||||
return nil, ErrNilConfig
|
||||
}
|
||||
|
||||
if err := conn.PingContext(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mx := &Mysql{
|
||||
conn: conn,
|
||||
db: nil,
|
||||
config: config,
|
||||
}
|
||||
|
||||
if config.DatabaseName == "" {
|
||||
query := `SELECT DATABASE()`
|
||||
var databaseName sql.NullString
|
||||
if err := conn.QueryRowContext(ctx, query).Scan(&databaseName); err != nil {
|
||||
return nil, &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
if len(databaseName.String) == 0 {
|
||||
return nil, ErrNoDatabaseName
|
||||
}
|
||||
|
||||
config.DatabaseName = databaseName.String
|
||||
}
|
||||
|
||||
if len(config.MigrationsTable) == 0 {
|
||||
config.MigrationsTable = DefaultMigrationsTable
|
||||
}
|
||||
|
||||
if err := mx.ensureVersionTable(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mx, nil
|
||||
}
|
||||
|
||||
// instance must have `multiStatements` set to true
|
||||
func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
if err := instance.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn, err := instance.Conn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mx, err := WithConnection(ctx, conn, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mx.db = instance
|
||||
|
||||
return mx, nil
|
||||
}
|
||||
|
||||
// extractCustomQueryParams extracts the custom query params (ones that start with "x-") from
|
||||
// mysql.Config.Params (connection parameters) as to not interfere with connecting to MySQL
|
||||
func extractCustomQueryParams(c *mysql.Config) (map[string]string, error) {
|
||||
if c == nil {
|
||||
return nil, ErrNilConfig
|
||||
}
|
||||
customQueryParams := map[string]string{}
|
||||
|
||||
for k, v := range c.Params {
|
||||
if strings.HasPrefix(k, "x-") {
|
||||
customQueryParams[k] = v
|
||||
delete(c.Params, k)
|
||||
}
|
||||
}
|
||||
return customQueryParams, nil
|
||||
}
|
||||
|
||||
func urlToMySQLConfig(url string) (*mysql.Config, error) {
|
||||
// Need to parse out custom TLS parameters and call
|
||||
// mysql.RegisterTLSConfig() before mysql.ParseDSN() is called
|
||||
// which consumes the registered tls.Config
|
||||
// Fixes: https://github.com/golang-migrate/migrate/issues/411
|
||||
//
|
||||
// Can't use url.Parse() since it fails to parse MySQL DSNs
|
||||
// mysql.ParseDSN() also searches for "?" to find query parameters:
|
||||
// https://github.com/go-sql-driver/mysql/blob/46351a8/dsn.go#L344
|
||||
if idx := strings.LastIndex(url, "?"); idx > 0 {
|
||||
rawParams := url[idx+1:]
|
||||
parsedParams, err := nurl.ParseQuery(rawParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctls := parsedParams.Get("tls")
|
||||
if len(ctls) > 0 {
|
||||
if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" {
|
||||
rootCertPool := x509.NewCertPool()
|
||||
pem, err := os.ReadFile(parsedParams.Get("x-tls-ca"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
|
||||
return nil, ErrAppendPEM
|
||||
}
|
||||
|
||||
clientCert := make([]tls.Certificate, 0, 1)
|
||||
if ccert, ckey := parsedParams.Get("x-tls-cert"), parsedParams.Get("x-tls-key"); ccert != "" || ckey != "" {
|
||||
if ccert == "" || ckey == "" {
|
||||
return nil, ErrTLSCertKeyConfig
|
||||
}
|
||||
certs, err := tls.LoadX509KeyPair(ccert, ckey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientCert = append(clientCert, certs)
|
||||
}
|
||||
|
||||
insecureSkipVerify := false
|
||||
insecureSkipVerifyStr := parsedParams.Get("x-tls-insecure-skip-verify")
|
||||
if len(insecureSkipVerifyStr) > 0 {
|
||||
x, err := strconv.ParseBool(insecureSkipVerifyStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
insecureSkipVerify = x
|
||||
}
|
||||
|
||||
err = mysql.RegisterTLSConfig(ctls, &tls.Config{
|
||||
RootCAs: rootCertPool,
|
||||
Certificates: clientCert,
|
||||
InsecureSkipVerify: insecureSkipVerify,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
config, err := mysql.ParseDSN(strings.TrimPrefix(url, "mysql://"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.MultiStatements = true
|
||||
|
||||
// Keep backwards compatibility from when we used net/url.Parse() to parse the DSN.
|
||||
// net/url.Parse() would automatically unescape it for us.
|
||||
// See: https://play.golang.org/p/q9j1io-YICQ
|
||||
user, err := nurl.QueryUnescape(config.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.User = user
|
||||
|
||||
password, err := nurl.QueryUnescape(config.Passwd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Passwd = password
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (m *Mysql) Open(url string) (database.Driver, error) {
|
||||
config, err := urlToMySQLConfig(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
customParams, err := extractCustomQueryParams(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
noLockParam, noLock := customParams["x-no-lock"], false
|
||||
if noLockParam != "" {
|
||||
noLock, err = strconv.ParseBool(noLockParam)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse x-no-lock as bool: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
statementTimeoutParam := customParams["x-statement-timeout"]
|
||||
statementTimeout := 0
|
||||
if statementTimeoutParam != "" {
|
||||
statementTimeout, err = strconv.Atoi(statementTimeoutParam)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse x-statement-timeout as float: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
db, err := sql.Open("mysql", config.FormatDSN())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mx, err := WithInstance(db, &Config{
|
||||
DatabaseName: config.DBName,
|
||||
MigrationsTable: customParams["x-migrations-table"],
|
||||
NoLock: noLock,
|
||||
StatementTimeout: time.Duration(statementTimeout) * time.Millisecond,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mx, nil
|
||||
}
|
||||
|
||||
func (m *Mysql) Close() error {
|
||||
connErr := m.conn.Close()
|
||||
var dbErr error
|
||||
if m.db != nil {
|
||||
dbErr = m.db.Close()
|
||||
}
|
||||
|
||||
if connErr != nil || dbErr != nil {
|
||||
return fmt.Errorf("conn: %v, db: %v", connErr, dbErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mysql) Lock() error {
|
||||
return database.CasRestoreOnErr(&m.isLocked, false, true, database.ErrLocked, func() error {
|
||||
if m.config.NoLock {
|
||||
return nil
|
||||
}
|
||||
aid, err := database.GenerateAdvisoryLockId(
|
||||
fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := "SELECT GET_LOCK(?, 10)"
|
||||
var success bool
|
||||
if err := m.conn.QueryRowContext(context.Background(), query, aid).Scan(&success); err != nil {
|
||||
return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)}
|
||||
}
|
||||
|
||||
if !success {
|
||||
return database.ErrLocked
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Mysql) Unlock() error {
|
||||
return database.CasRestoreOnErr(&m.isLocked, true, false, database.ErrNotLocked, func() error {
|
||||
if m.config.NoLock {
|
||||
return nil
|
||||
}
|
||||
|
||||
aid, err := database.GenerateAdvisoryLockId(
|
||||
fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := `SELECT RELEASE_LOCK(?)`
|
||||
if _, err := m.conn.ExecContext(context.Background(), query, aid); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
// NOTE: RELEASE_LOCK could return NULL or (or 0 if the code is changed),
|
||||
// in which case isLocked should be true until the timeout expires -- synchronizing
|
||||
// these states is likely not worth trying to do; reconsider the necessity of isLocked.
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Mysql) Run(migration io.Reader) error {
|
||||
migr, err := io.ReadAll(migration)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if m.config.StatementTimeout != 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, m.config.StatementTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
query := string(migr[:])
|
||||
if _, err := m.conn.ExecContext(ctx, query); err != nil {
|
||||
return database.Error{OrigErr: err, Err: "migration failed", Query: migr}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mysql) SetVersion(version int, dirty bool) error {
|
||||
tx, err := m.conn.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelSerializable})
|
||||
if err != nil {
|
||||
return &database.Error{OrigErr: err, Err: "transaction start failed"}
|
||||
}
|
||||
|
||||
query := "DELETE FROM `" + m.config.MigrationsTable + "`"
|
||||
if _, err := tx.ExecContext(context.Background(), query); err != nil {
|
||||
if errRollback := tx.Rollback(); errRollback != nil {
|
||||
err = multierror.Append(err, errRollback)
|
||||
}
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
// Also re-write the schema version for nil dirty versions to prevent
|
||||
// empty schema version for failed down migration on the first migration
|
||||
// See: https://github.com/golang-migrate/migrate/issues/330
|
||||
if version >= 0 || (version == database.NilVersion && dirty) {
|
||||
query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)"
|
||||
if _, err := tx.ExecContext(context.Background(), query, version, dirty); err != nil {
|
||||
if errRollback := tx.Rollback(); errRollback != nil {
|
||||
err = multierror.Append(err, errRollback)
|
||||
}
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return &database.Error{OrigErr: err, Err: "transaction commit failed"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mysql) Version() (version int, dirty bool, err error) {
|
||||
query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1"
|
||||
err = m.conn.QueryRowContext(context.Background(), query).Scan(&version, &dirty)
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return database.NilVersion, false, nil
|
||||
|
||||
case err != nil:
|
||||
if e, ok := err.(*mysql.MySQLError); ok {
|
||||
if e.Number == 0 {
|
||||
return database.NilVersion, false, nil
|
||||
}
|
||||
}
|
||||
return 0, false, &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
|
||||
default:
|
||||
return version, dirty, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Mysql) Drop() (err error) {
|
||||
// select all tables
|
||||
query := `SHOW TABLES LIKE '%'`
|
||||
tables, err := m.conn.QueryContext(context.Background(), query)
|
||||
if err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
defer func() {
|
||||
if errClose := tables.Close(); errClose != nil {
|
||||
err = multierror.Append(err, errClose)
|
||||
}
|
||||
}()
|
||||
|
||||
// delete one table after another
|
||||
tableNames := make([]string, 0)
|
||||
for tables.Next() {
|
||||
var tableName string
|
||||
if err := tables.Scan(&tableName); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(tableName) > 0 {
|
||||
tableNames = append(tableNames, tableName)
|
||||
}
|
||||
}
|
||||
if err := tables.Err(); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
if len(tableNames) > 0 {
|
||||
// disable checking foreign key constraints until finished
|
||||
query = `SET foreign_key_checks = 0`
|
||||
if _, err := m.conn.ExecContext(context.Background(), query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// enable foreign key checks
|
||||
_, _ = m.conn.ExecContext(context.Background(), `SET foreign_key_checks = 1`)
|
||||
}()
|
||||
|
||||
// delete one by one ...
|
||||
for _, t := range tableNames {
|
||||
query = "DROP TABLE IF EXISTS `" + t + "`"
|
||||
if _, err := m.conn.ExecContext(context.Background(), query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureVersionTable checks if versions table exists and, if not, creates it.
|
||||
// Note that this function locks the database, which deviates from the usual
|
||||
// convention of "caller locks" in the Mysql type.
|
||||
func (m *Mysql) ensureVersionTable() (err error) {
|
||||
if err = m.Lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e := m.Unlock(); e != nil {
|
||||
if err == nil {
|
||||
err = e
|
||||
} else {
|
||||
err = multierror.Append(err, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// check if migration table exists
|
||||
var result string
|
||||
query := `SHOW TABLES LIKE '` + m.config.MigrationsTable + `'`
|
||||
if err := m.conn.QueryRowContext(context.Background(), query).Scan(&result); err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if not, create the empty migration table
|
||||
query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)"
|
||||
if _, err := m.conn.ExecContext(context.Background(), query); err != nil {
|
||||
return &database.Error{OrigErr: err, Query: []byte(query)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the bool value of the input.
|
||||
// The 2nd return value indicates if the input was a valid bool value
|
||||
// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71
|
||||
func readBool(input string) (value bool, valid bool) {
|
||||
switch input {
|
||||
case "1", "true", "TRUE", "True":
|
||||
return true, true
|
||||
case "0", "false", "FALSE", "False":
|
||||
return false, true
|
||||
}
|
||||
|
||||
// Not a valid bool value
|
||||
return
|
||||
}
|
||||
@@ -0,0 +1,420 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
sqldriver "database/sql/driver"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/dhui/dktest"
|
||||
"github.com/go-sql-driver/mysql"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
dt "github.com/golang-migrate/migrate/v4/database/testing"
|
||||
"github.com/golang-migrate/migrate/v4/dktesting"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const defaultPort = 3306
|
||||
|
||||
var (
|
||||
opts = dktest.Options{
|
||||
Env: map[string]string{"MYSQL_ROOT_PASSWORD": "root", "MYSQL_DATABASE": "public"},
|
||||
PortRequired: true, ReadyFunc: isReady,
|
||||
}
|
||||
optsAnsiQuotes = dktest.Options{
|
||||
Env: map[string]string{"MYSQL_ROOT_PASSWORD": "root", "MYSQL_DATABASE": "public"},
|
||||
PortRequired: true, ReadyFunc: isReady,
|
||||
Cmd: []string{"--sql-mode=ANSI_QUOTES"},
|
||||
}
|
||||
// Supported versions: https://www.mysql.com/support/supportedplatforms/database.html
|
||||
specs = []dktesting.ContainerSpec{
|
||||
{ImageName: "mysql:5.5", Options: opts},
|
||||
{ImageName: "mysql:5.6", Options: opts},
|
||||
{ImageName: "mysql:5.7", Options: opts},
|
||||
{ImageName: "mysql:8", Options: opts},
|
||||
}
|
||||
specsAnsiQuotes = []dktesting.ContainerSpec{
|
||||
{ImageName: "mysql:5.5", Options: optsAnsiQuotes},
|
||||
{ImageName: "mysql:5.6", Options: optsAnsiQuotes},
|
||||
{ImageName: "mysql:5.7", Options: optsAnsiQuotes},
|
||||
{ImageName: "mysql:8", Options: optsAnsiQuotes},
|
||||
}
|
||||
)
|
||||
|
||||
func isReady(ctx context.Context, c dktest.ContainerInfo) bool {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
db, err := sql.Open("mysql", fmt.Sprintf("root:root@tcp(%v:%v)/public", ip, port))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Println("close error:", err)
|
||||
}
|
||||
}()
|
||||
if err = db.PingContext(ctx); err != nil {
|
||||
switch err {
|
||||
case sqldriver.ErrBadConn, mysql.ErrInvalidConn:
|
||||
return false
|
||||
default:
|
||||
fmt.Println(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
// mysql.SetLogger(mysql.Logger(log.New(io.Discard, "", log.Ltime)))
|
||||
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", ip, port)
|
||||
p := &Mysql{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
dt.Test(t, d, []byte("SELECT 1"))
|
||||
|
||||
// check ensureVersionTable
|
||||
if err := d.(*Mysql).ensureVersionTable(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// check again
|
||||
if err := d.(*Mysql).ensureVersionTable(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
// mysql.SetLogger(mysql.Logger(log.New(io.Discard, "", log.Ltime)))
|
||||
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", ip, port)
|
||||
p := &Mysql{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "public", d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.TestMigrate(t, m)
|
||||
|
||||
// check ensureVersionTable
|
||||
if err := d.(*Mysql).ensureVersionTable(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// check again
|
||||
if err := d.(*Mysql).ensureVersionTable(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMigrateAnsiQuotes(t *testing.T) {
|
||||
// mysql.SetLogger(mysql.Logger(log.New(io.Discard, "", log.Ltime)))
|
||||
|
||||
dktesting.ParallelTest(t, specsAnsiQuotes, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", ip, port)
|
||||
p := &Mysql{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
m, err := migrate.NewWithDatabaseInstance("file://./examples/migrations", "public", d)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.TestMigrate(t, m)
|
||||
|
||||
// check ensureVersionTable
|
||||
if err := d.(*Mysql).ensureVersionTable(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// check again
|
||||
if err := d.(*Mysql).ensureVersionTable(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLockWorks(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", ip, port)
|
||||
p := &Mysql{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dt.Test(t, d, []byte("SELECT 1"))
|
||||
|
||||
ms := d.(*Mysql)
|
||||
|
||||
err = ms.Lock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ms.Unlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// make sure the 2nd lock works (RELEASE_LOCK is very finicky)
|
||||
err = ms.Lock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = ms.Unlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestNoLockParamValidation(t *testing.T) {
|
||||
ip := "127.0.0.1"
|
||||
port := 3306
|
||||
addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", ip, port)
|
||||
p := &Mysql{}
|
||||
_, err := p.Open(addr + "?x-no-lock=not-a-bool")
|
||||
if !errors.Is(err, strconv.ErrSyntax) {
|
||||
t.Fatal("Expected syntax error when passing a non-bool as x-no-lock parameter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoLockWorks(t *testing.T) {
|
||||
dktesting.ParallelTest(t, specs, func(t *testing.T, c dktest.ContainerInfo) {
|
||||
ip, port, err := c.Port(defaultPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", ip, port)
|
||||
p := &Mysql{}
|
||||
d, err := p.Open(addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lock := d.(*Mysql)
|
||||
|
||||
p = &Mysql{}
|
||||
d, err = p.Open(addr + "?x-no-lock=true")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
noLock := d.(*Mysql)
|
||||
|
||||
// Should be possible to take real lock and no-lock at the same time
|
||||
if err = lock.Lock(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = noLock.Lock(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = lock.Unlock(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = noLock.Unlock(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestExtractCustomQueryParams(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
config *mysql.Config
|
||||
expectedParams map[string]string
|
||||
expectedCustomParams map[string]string
|
||||
expectedErr error
|
||||
}{
|
||||
{name: "nil config", expectedErr: ErrNilConfig},
|
||||
{
|
||||
name: "no params",
|
||||
config: mysql.NewConfig(),
|
||||
expectedCustomParams: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "no custom params",
|
||||
config: &mysql.Config{Params: map[string]string{"hello": "world"}},
|
||||
expectedParams: map[string]string{"hello": "world"},
|
||||
expectedCustomParams: map[string]string{},
|
||||
},
|
||||
{
|
||||
name: "one param, one custom param",
|
||||
config: &mysql.Config{
|
||||
Params: map[string]string{"hello": "world", "x-foo": "bar"},
|
||||
},
|
||||
expectedParams: map[string]string{"hello": "world"},
|
||||
expectedCustomParams: map[string]string{"x-foo": "bar"},
|
||||
},
|
||||
{
|
||||
name: "multiple params, multiple custom params",
|
||||
config: &mysql.Config{
|
||||
Params: map[string]string{
|
||||
"hello": "world",
|
||||
"x-foo": "bar",
|
||||
"dead": "beef",
|
||||
"x-cat": "hat",
|
||||
},
|
||||
},
|
||||
expectedParams: map[string]string{"hello": "world", "dead": "beef"},
|
||||
expectedCustomParams: map[string]string{"x-foo": "bar", "x-cat": "hat"},
|
||||
},
|
||||
}
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
customParams, err := extractCustomQueryParams(tc.config)
|
||||
if tc.config != nil {
|
||||
assert.Equal(t, tc.expectedParams, tc.config.Params,
|
||||
"Expected config params have custom params properly removed")
|
||||
}
|
||||
assert.Equal(t, tc.expectedErr, err, "Expected errors to match")
|
||||
assert.Equal(t, tc.expectedCustomParams, customParams,
|
||||
"Expected custom params to be properly extracted")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createTmpCert(t *testing.T) string {
|
||||
tmpCertFile, err := os.CreateTemp("", "migrate_test_cert")
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create temp cert file:", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := os.Remove(tmpCertFile.Name()); err != nil {
|
||||
t.Log("Failed to cleanup temp cert file:", err)
|
||||
}
|
||||
})
|
||||
|
||||
r := rand.New(rand.NewSource(0))
|
||||
pub, priv, err := ed25519.GenerateKey(r)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to generate ed25519 key for temp cert file:", err)
|
||||
}
|
||||
tmpl := x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
}
|
||||
derBytes, err := x509.CreateCertificate(r, &tmpl, &tmpl, pub, priv)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to generate temp cert file:", err)
|
||||
}
|
||||
if err := pem.Encode(tmpCertFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||
t.Fatal("Failed to encode ")
|
||||
}
|
||||
if err := tmpCertFile.Close(); err != nil {
|
||||
t.Fatal("Failed to close temp cert file:", err)
|
||||
}
|
||||
return tmpCertFile.Name()
|
||||
}
|
||||
|
||||
func TestURLToMySQLConfig(t *testing.T) {
|
||||
tmpCertFilename := createTmpCert(t)
|
||||
tmpCertFilenameEscaped := url.PathEscape(tmpCertFilename)
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
urlStr string
|
||||
expectedDSN string // empty string signifies that an error is expected
|
||||
}{
|
||||
{name: "no user/password", urlStr: "mysql://tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "only user", urlStr: "mysql://username@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "username@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "only user - with encoded :",
|
||||
urlStr: "mysql://username%3A@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "username:@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "only user - with encoded @",
|
||||
urlStr: "mysql://username%40@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "username@@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "user/password", urlStr: "mysql://username:password@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "username:password@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
// Not supported yet: https://github.com/go-sql-driver/mysql/issues/591
|
||||
// {name: "user/password - user with encoded :",
|
||||
// urlStr: "mysql://username%3A:password@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
// expectedDSN: "username::password@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "user/password - user with encoded @",
|
||||
urlStr: "mysql://username%40:password@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "username@:password@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "user/password - password with encoded :",
|
||||
urlStr: "mysql://username:password%3A@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "username:password:@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "user/password - password with encoded @",
|
||||
urlStr: "mysql://username:password%40@tcp(127.0.0.1:3306)/myDB?multiStatements=true",
|
||||
expectedDSN: "username:password@@tcp(127.0.0.1:3306)/myDB?multiStatements=true"},
|
||||
{name: "custom tls",
|
||||
urlStr: "mysql://username:password@tcp(127.0.0.1:3306)/myDB?multiStatements=true&tls=custom&x-tls-ca=" + tmpCertFilenameEscaped,
|
||||
expectedDSN: "username:password@tcp(127.0.0.1:3306)/myDB?multiStatements=true&tls=custom&x-tls-ca=" + tmpCertFilenameEscaped},
|
||||
}
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
config, err := urlToMySQLConfig(tc.urlStr)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to parse url string:", tc.urlStr, "error:", err)
|
||||
}
|
||||
dsn := config.FormatDSN()
|
||||
if dsn != tc.expectedDSN {
|
||||
t.Error("Got unexpected DSN:", dsn, "!=", tc.expectedDSN)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
# neo4j
|
||||
The Neo4j driver (bolt) does not natively support executing multiple statements in a single query. To allow for multiple statements in a single migration, you can use the `x-multi-statement` param.
|
||||
This mode splits the migration text into separately-executed statements by a semi-colon `;`. Thus `x-multi-statement` cannot be used when a statement in the migration contains a string with a semi-colon.
|
||||
The queries **should** run in a single transaction, so partial migrations should not be a concern, but this is untested.
|
||||
|
||||
|
||||
`neo4j://user:password@host:port/`
|
||||
|
||||
| URL Query | WithInstance Config | Description |
|
||||
|------------|---------------------|-------------|
|
||||
| `x-multi-statement` | `MultiStatement` | Enable multiple statements to be ran in a single migration (See note above) |
|
||||
| `user` | Contained within `AuthConfig` | The user to sign in as |
|
||||
| `password` | Contained within `AuthConfig` | The user's password |
|
||||
| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) |
|
||||
| `port` | | The port to bind to. (default is 7687) |
|
||||
| | `MigrationsLabel` | Name of the migrations node label |
|
||||
|
||||
## Supported versions
|
||||
|
||||
Only Neo4j v3.5+ is [supported](https://github.com/neo4j/neo4j-go-driver/issues/64#issuecomment-625133600)
|
||||
@@ -0,0 +1,97 @@
|
||||
## Create migrations
|
||||
Let's create nodes called `Users`:
|
||||
```
|
||||
migrate create -ext cypher -dir db/migrations -seq create_user_nodes
|
||||
```
|
||||
If there were no errors, we should have two files available under `db/migrations` folder:
|
||||
- 000001_create_user_nodes.down.cypher
|
||||
- 000001_create_user_nodes.up.cypher
|
||||
|
||||
Note the `cypher` extension that we provided.
|
||||
|
||||
In the `.up.cypher` file let's create the table:
|
||||
```
|
||||
CREATE (u1:User {name: "Peter"})
|
||||
CREATE (u2:User {name: "Paul"})
|
||||
CREATE (u3:User {name: "Mary"})
|
||||
```
|
||||
And in the `.down.sql` let's delete it:
|
||||
```
|
||||
MATCH (u:User) WHERE u.name IN ["Peter", "Paul", "Mary"] DELETE u
|
||||
```
|
||||
Ideally your migrations should be idempotent. You can read more about idempotency in [getting started](GETTING_STARTED.md#create-migrations)
|
||||
|
||||
## Run migrations
|
||||
```
|
||||
migrate -database ${NEO4J_URL} -path db/migrations up
|
||||
```
|
||||
Let's check if the table was created properly by running `bin/cypher-shell -u neo4j -p password`, then `neo4j> MATCH (u:User)`
|
||||
The output you are supposed to see:
|
||||
```
|
||||
+-----------------------------------------------------------------+
|
||||
| u |
|
||||
+-----------------------------------------------------------------+
|
||||
| (:User {name: "Peter") |
|
||||
| (:User {name: "Paul") |
|
||||
| (:User {name: "Mary") |
|
||||
+-----------------------------------------------------------------+
|
||||
```
|
||||
Great! Now let's check if running reverse migration also works:
|
||||
```
|
||||
migrate -database ${NEO4J_URL} -path db/migrations down
|
||||
```
|
||||
Make sure to check if your database changed as expected in this case as well.
|
||||
|
||||
## Database transactions
|
||||
|
||||
To show database transactions usage, let's create another set of migrations by running:
|
||||
```
|
||||
migrate create -ext cypher -dir db/migrations -seq add_mood_to_users
|
||||
```
|
||||
Again, it should create for us two migrations files:
|
||||
- 000002_add_mood_to_users.down.cypher
|
||||
- 000002_add_mood_to_users.up.cypher
|
||||
|
||||
In Neo4j, when we want our queries to be done in a transaction, we need to wrap it with `:BEGIN` and `:COMMIT` commands.
|
||||
Migration up:
|
||||
```
|
||||
:BEGIN
|
||||
|
||||
MATCH (u:User)
|
||||
SET u.mood = "Cheery"
|
||||
|
||||
:COMMIT
|
||||
```
|
||||
Migration down:
|
||||
```
|
||||
:BEGIN
|
||||
|
||||
MATCH (u:User)
|
||||
SET u.mood = null
|
||||
|
||||
:COMMIT
|
||||
```
|
||||
|
||||
## Optional: Run migrations within your Go app
|
||||
Here is a very simple app running migrations for the above configuration:
|
||||
```
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/neo4j"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m, err := migrate.New(
|
||||
"file://db/migrations",
|
||||
"neo4j://neo4j:password@localhost:7687/")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := m.Up(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
+1
@@ -0,0 +1 @@
|
||||
DROP CONSTRAINT ON (m:Movie) ASSERT m.Name IS UNIQUE
|
||||
+1
@@ -0,0 +1 @@
|
||||
CREATE CONSTRAINT ON (m:Movie) ASSERT m.Name IS UNIQUE
|
||||
+2
@@ -0,0 +1,2 @@
|
||||
MATCH (m:Movie)
|
||||
DELETE m
|
||||
+2
@@ -0,0 +1,2 @@
|
||||
CREATE (:Movie {name: "Footloose"})
|
||||
CREATE (:Movie {name: "Ghost"})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user