4.7.2-0.1.0 • Published 6 years ago

@helm-charts/bitnami-elasticsearch v4.7.2-0.1.0

Weekly downloads
1
License
MIT
Repository
-
Last release
6 years ago

@helm-charts/bitnami-elasticsearch

A highly scalable open-source full-text search and analytics engine

FieldValue
Repository Namebitnami
Chart Nameelasticsearch
Chart Version4.7.2
NPM Package Version0.1.0
## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
#   imageRegistry: myRegistryName
#   imagePullSecrets:
#     - myRegistryKeySecretName

## Bitnami Elasticsearch image version
## ref: https://hub.docker.com/r/bitnami/elasticsearch/tags/
##
image:
  registry: docker.io
  repository: bitnami/elasticsearch
  tag: 6.7.1
  ## Specify a imagePullPolicy
  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  ##
  pullPolicy: Always
  ## Optionally specify an array of imagePullSecrets.
  ## Secrets must be manually created in the namespace.
  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  ##
  # pullSecrets:
  #   - myRegistryKeySecretName

## Image that performs the sysctl operation
##
sysctlImage:
  enabled: false
  registry: docker.io
  repository: bitnami/minideb
  tag: latest
  ## Specify a imagePullPolicy
  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  ##
  pullPolicy: Always
  ## Optionally specify an array of imagePullSecrets.
  ## Secrets must be manually created in the namespace.
  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  ##
  # pullSecrets:
  #   - myRegistryKeySecretName

## Elasticsearch cluster name
##
name: elastic

## Comma, semi-colon or space separated list of plugins to install at initialization
## ref: https://github.com/bitnami/bitnami-docker-elasticsearch#environment-variables
##
# plugins:

## Customize elasticsearch configuration
## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
##
# config:

## Elasticsearch master-eligible node parameters
##
master:
  name: master
  replicas: 2
  heapSize: 128m
  antiAffinity: 'soft'
  ## Node Affinity
  # nodeAffinity:
  service:
    ## master-eligible service type
    type: ClusterIP
    ## Elasticsearch transport port
    port: 9300
    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
    ##
    # nodePort:

    ## Provide any additional annotations which may be required. This can be used to
    ## set the LoadBalancer service type to internal only.
    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
    ##
    annotations: {}
    # loadBalancerIP:

  ## Configure resource requests and limits
  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  ##
  ## Configure resource requests and limits
  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  ##
  resources:
    requests:
      cpu: 25m
      memory: '256Mi'
  ## Elasticsearch master-eligible Liveness Probe
  livenessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5
  ## Elasticsearch master-eligible Readiness Probe
  readinessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5

## Elasticsearch discovery node parameters
##
discovery:
  name: discovery

## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
  enabled: true
  fsGroup: 1001
  runAsUser: 1001

## Elasticsearch coordinating-only node parameters
##
coordinating:
  name: coordinating-only
  replicas: 2
  heapSize: 128m
  antiAffinity: 'soft'
  ## node affinity
  # nodeAffinity:
  service:
    ## coordinating-only service type
    type: ClusterIP
    ## Elasticsearch REST API port
    port: 9200
    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
    ##
    # nodePort:

    ## Provide any additional annotations which may be required. This can be used to
    ## set the LoadBalancer service type to internal only.
    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
    ##
    annotations: {}
    # loadBalancerIP:

  ## Configure resource requests and limits
  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  ##
  resources:
    requests:
      cpu: 25m
      memory: '256Mi'
  ## Elasticsearch master-eligible Liveness Probe
  livenessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5
  ## Elasticsearch master-eligible Readiness Probe
  readinessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5

## Elasticsearch data node parameters
##
data:
  name: data
  replicas: 2
  ## updateStrategy for ElasticSearch Data statefulset
  ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
  updateStrategy:
    type: RollingUpdate
    # rollingUpdatePartition
  heapSize: 1024m
  antiAffinity: 'soft'
  ## node affinity
  # nodeAffinity:
  ## Configure resource requests and limits
  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  ##
  resources:
    requests:
      cpu: 25m
      memory: '1152Mi'
  ## Elasticsearch master-eligible Liveness Probe
  livenessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5
  ## Elasticsearch master-eligible Readiness Probe
  readinessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5
  ## Enable persistence using Persistent Volume Claims
  ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
  ##
  persistence:
    ## If true, use a Persistent Volume Claim, If false, use emptyDir
    ##
    enabled: true

    ## Persistent Volume Claim annotations
    ##
    annotations: {}

    ## Persistent Volume Storage Class
    ## If defined, storageClassName: <storageClass>
    ## If set to "-", storageClassName: "", which disables dynamic provisioning
    ## If undefined (the default) or set to null, no storageClassName spec is
    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
    ##   GKE, AWS & OpenStack)
    ##
    # storageClass: "-"

    ## Persistent Volume Access Mode
    accessModes:
      - ReadWriteOnce

    ## Persistent Volume size
    ##
    size: 8Gi

## Elasticsearch ingest node parameters
##
ingest:
  enabled: false
  name: ingest
  replicas: 2
  heapSize: 128m
  antiAffinity: 'soft'
  ## node affinity
  # nodeAffinity:
  service:
    ## ingest service type
    type: ClusterIP
    ## Elasticsearch transport port
    port: 9300
    ## Specify the nodePort value for the LoadBalancer and NodePort service types.
    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
    ##
    # nodePort:

    ## Provide any additional annotations which may be required. This can be used to
    ## set the LoadBalancer service type to internal only.
    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
    ##
    annotations: {}
    # loadBalancerIP:

  ## Configure resource requests and limits
  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
  ##
  resources:
    requests:
      cpu: 25m
      memory: '256Mi'
  ## Elasticsearch master-eligible Liveness Probe
  livenessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5
  ## Elasticsearch master-eligible Readiness Probe
  readinessProbe:
    enabled: false
  #  initialDelaySeconds: 90
  #  periodSeconds: 10
  #  timeoutSeconds: 5
  #  successThreshold: 1
  #  failureThreshold: 5

metrics:
  enabled: false
  name: metrics
  image:
    registry: docker.io
    repository: bitnami/elasticsearch-exporter
    tag: 1.0.2
    pullPolicy: Always
    ## Optionally specify an array of imagePullSecrets.
    ## Secrets must be manually created in the namespace.
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
    ##
    # pullSecrets:
    #   - myRegistryKeySecretName
  annotations:
    prometheus.io/scrape: 'true'
    prometheus.io/port: '9108'
  service:
    type: ClusterIP
  resources:
    # requests:
    #   cpu: 25m

Elasticsearch

Elasticsearch is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time.

TL;DR

$ helm install bitnami/elasticsearch

Introduction

This chart bootstraps a Elasticsearch deployment on a Kubernetes cluster using the Helm package manager.

Bitnami charts can be used with Kubeapps for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of Bitnami Kubernetes Production Runtime (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications.

Prerequisites

  • Kubernetes 1.6+ with Beta APIs enabled
  • PV provisioner support in the underlying infrastructure

Installing the Chart

To install the chart with the release name my-release:

$ helm install --name my-release bitnami/elasticsearch

The command deploys Elasticsearch on the Kubernetes cluster in the default configuration. The configuration section lists the parameters that can be configured during installation.

Tip: List all releases using helm list

Uninstalling the Chart

To uninstall/delete the my-release release:

$ helm delete my-release

The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using --purge option:

$ helm delete --purge my-release

Configuration

The following table lists the configurable parameters of the Elasticsearch chart and their default values.

ParameterDescriptionDefault
global.imageRegistryGlobal Docker image registrynil
global.imagePullSecretsGlobal Docker registry secret names as an array[] (does not add image pull secrets to deployed pods)
image.registryElasticsearch image registrydocker.io
image.repositoryElasticsearch image repositorybitnami/elasticsearch
image.tagElasticsearch image tag{VERSION}
image.pullPolicyImage pull policyAlways
image.pullSecretsSpecify docker-registry secret names as an array[] (does not add image pull secrets to deployed pods)
nameElasticsearch cluster nameelastic
pluginsComma, semi-colon or space separated list of plugins to install at initializationnil
configElasticsearch node custom configuration
master.nameMaster-eligible node pod namemaster
master.replicasDesired number of Elasticsearch master-eligible nodes2
master.heapSizeMaster-eligible node heap size128m
master.antiAffinityMaster-eligible node pod anti-affinity policysoft
coordinating.nodeAffinityMaster-eligible node affinity policynil
master.service.typeKubernetes Service type (master-eligible nodes)ClusterIP
master.service.portKubernetes Service port for Elasticsearch transport port (master-eligible nodes)9300
master.service.nodePortKubernetes Service nodePort (master-eligible nodes)nil
master.service.annotationsAnnotations for master-eligible nodes service{}
master.service.loadBalancerIPloadBalancerIP if master-eligible nodes service type is LoadBalancernil
master.resourcesCPU/Memory resource requests/limits for master-eligible nodes podsrequests: { cpu: "25m", memory: "256Mi" }
master.livenessProbe.enabledEnable/disable the liveness probe (master-eligible nodes pod)true
master.livenessProbe.initialDelaySecondsDelay before liveness probe is initiated (master-eligible nodes pod)90
master.livenessProbe.periodSecondsHow often to perform the probe (master-eligible nodes pod)10
master.livenessProbe.timeoutSecondsWhen the probe times out (master-eligible nodes pod)5
master.livenessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod)1
master.livenessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
master.readinessProbe.enabledEnable/disable the readiness probe (master-eligible nodes pod)true
master.readinessProbe.initialDelaySecondsDelay before readiness probe is initiated (master-eligible nodes pod)90
master.readinessProbe.periodSecondsHow often to perform the probe (master-eligible nodes pod)10
master.readinessProbe.timeoutSecondsWhen the probe times out (master-eligible nodes pod)5
master.readinessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod)1
master.readinessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
securityContext.enabledEnable security contexttrue
securityContext.fsGroupGroup ID for the container1001
securityContext.runAsUserUser ID for the container1001
discovery.nameDiscover node pod namediscovery
coordinating.nameCoordinating-only node pod namecoordinating-only
coordinating.replicasDesired number of Elasticsearch coordinating-only nodes2
coordinating.heapSizeCoordinating-only node heap size128m
coordinating.antiAffinityCoordinating-only node pod anti-affinity policysoft
coordinating.nodeAffinityCoordinating-only node affinity policynil
coordinating.service.typeKubernetes Service type (coordinating-only nodes)ClusterIP
coordinating.service.portKubernetes Service port for REST API (coordinating-only nodes)9200
coordinating.service.nodePortKubernetes Service nodePort (coordinating-only nodes)nil
coordinating.service.annotationsAnnotations for coordinating-only nodes service{}
coordinating.service.loadBalancerIPloadBalancerIP if coordinating-only nodes service type is LoadBalancernil
coordinating.resourcesCPU/Memory resource requests/limits for coordinating-only nodes podsrequests: { cpu: "25m", memory: "256Mi" }
coordinating.livenessProbe.enabledEnable/disable the liveness probe (coordinating-only nodes pod)true
coordinating.livenessProbe.initialDelaySecondsDelay before liveness probe is initiated (coordinating-only nodes pod)90
coordinating.livenessProbe.periodSecondsHow often to perform the probe (coordinating-only nodes pod)10
coordinating.livenessProbe.timeoutSecondsWhen the probe times out (coordinating-only nodes pod)5
coordinating.livenessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod)1
coordinating.livenessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
coordinating.readinessProbe.enabledEnable/disable the readiness probe (coordinating-only nodes pod)true
coordinating.readinessProbe.initialDelaySecondsDelay before readiness probe is initiated (coordinating-only nodes pod)90
coordinating.readinessProbe.periodSecondsHow often to perform the probe (coordinating-only nodes pod)10
coordinating.readinessProbe.timeoutSecondsWhen the probe times out (coordinating-only nodes pod)5
coordinating.readinessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod)1
coordinating.readinessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
data.nameData node pod namedata
data.replicasDesired number of Elasticsearch data nodes nodes3
data.updateStrategy.typeUpdate strategy for Data statefulsetRollingUpdate
data.updateStrategy.rollingUpdatePartitionPartition update strategy for Data statefulsetnil
data.heapSizeData node heap size1024m
data.antiAffinityData pod anti-affinity policysoft
data.nodeAffinityData pod node affinity policynil
data.resourcesCPU/Memory resource requests/limits for data nodesrequests: { cpu: "25m", memory: "1152Mi" }
data.persistence.enabledEnable persistence using a PersistentVolumeClaimtrue
data.persistence.annotationsPersistent Volume Claim annotations{}
data.persistence.storageClassPersistent Volume Storage Class
data.persistence.accessModesPersistent Volume Access Modes[ReadWriteOnce]
data.persistence.sizePersistent Volume Size8Gi
data.livenessProbe.enabledEnable/disable the liveness probe (data nodes pod)true
data.livenessProbe.initialDelaySecondsDelay before liveness probe is initiated (data nodes pod)90
data.livenessProbe.periodSecondsHow often to perform the probe (data nodes pod)10
data.livenessProbe.timeoutSecondsWhen the probe times out (data nodes pod)5
data.livenessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (data nodes pod)1
data.livenessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
data.readinessProbe.enabledEnable/disable the readiness probe (data nodes pod)true
data.readinessProbe.initialDelaySecondsDelay before readiness probe is initiated (data nodes pod)90
data.readinessProbe.periodSecondsHow often to perform the probe (data nodes pod)10
data.readinessProbe.timeoutSecondsWhen the probe times out (data nodes pod)5
data.readinessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (data nodes pod)1
data.readinessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
ingest.enabledEnable ingest nodesfalse
ingest.nameIngest node pod nameingest
ingest.replicasDesired number of Elasticsearch ingest nodes2
ingest.heapSizeIngest node heap size128m
ingest.antiAffinityIngest node pod anti-affinity policysoft
ingest.nodeAffinityIngest node pod affinity policynil
ingest.service.typeKubernetes Service type (ingest nodes)ClusterIP
ingest.service.portKubernetes Service port Elasticsearch transport port (ingest nodes)9300
ingest.service.nodePortKubernetes Service nodePort (ingest nodes)nil
ingest.service.annotationsAnnotations for ingest nodes service{}
ingest.service.loadBalancerIPloadBalancerIP if ingest nodes service type is LoadBalancernil
ingest.resourcesCPU/Memory resource requests/limits for ingest nodes podsrequests: { cpu: "25m", memory: "256Mi" }
ingest.livenessProbe.enabledEnable/disable the liveness probe (ingest nodes pod)true
ingest.livenessProbe.initialDelaySecondsDelay before liveness probe is initiated (ingest nodes pod)90
ingest.livenessProbe.periodSecondsHow often to perform the probe (ingest nodes pod)10
ingest.livenessProbe.timeoutSecondsWhen the probe times out (ingest nodes pod)5
ingest.livenessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod)1
ingest.livenessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
ingest.readinessProbe.enabledEnable/disable the readiness probe (ingest nodes pod)true
ingest.readinessProbe.initialDelaySecondsDelay before readiness probe is initiated (ingest nodes pod)90
ingest.readinessProbe.periodSecondsHow often to perform the probe (ingest nodes pod)10
ingest.readinessProbe.timeoutSecondsWhen the probe times out (ingest nodes pod)5
ingest.readinessProbe.successThresholdMinimum consecutive successes for the probe to be considered successful after having failed (ingest nodes pod)1
ingest.readinessProbe.failureThresholdMinimum consecutive failures for the probe to be considered failed after having succeeded5
metrics.enabledEnable prometheus exporterfalse
metrics.nameMetrics pod namemetrics
metrics.image.registryMetrics exporter image registrydocker.io
metrics.image.repositoryMetrics exporter image repositorybitnami/elasticsearch-exporter
metrics.image.tagMetrics exporter image tag1.0.2
metrics.image.pullPolicyMetrics exporter image pull policyAlways
metrics.service.typeMetrics exporter endpoint service typeClusterIP
metrics.resourcesMetrics exporter resource requests/limitrequests: { cpu: "25m" }
sysctlImage.enabledEnable kernel settings modifier imagefalse
sysctlImage.registryKernel settings modifier image registrydocker.io
sysctlImage.repositoryKernel settings modifier image repositorybitnami/minideb
sysctlImage.tagKernel settings modifier image taglatest
sysctlImage.pullPolicyKernel settings modifier image pull policyAlways

Specify each parameter using the --set key=value[,key=value] argument to helm install. For example,

$ helm install --name my-release \
  --set name=my-elastic,client.service.port=8080 \
  bitnami/elasticsearch

The above command sets the Elasticsearch cluster name to my-elastic and REST port number to 8080.

Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,

$ helm install --name my-release -f values.yaml bitnami/elasticsearch

Tip: You can use the default values.yaml. values-production.yaml has defaults optimized for use in production environments.

Persistence

The Bitnami Elasticsearch image stores the Elasticsearch data at the /bitnami/elasticsearch/data path of the container.

By default, the chart mounts a Persistent Volume at this location. The volume is created using dynamic volume provisioning. See the Configuration section to configure the PVC.

Troubleshooting

Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below:

You can use a privileged initContainer to changes those settings in the Kernel by enabling the sysctlImage.enabled:

$ helm install --name my-release \
  --set sysctlImage.enabled=true \
  bitnami/elasticsearch

Upgrading

To 3.0.0

Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch:

$ kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]'
$ kubectl delete statefulset elasticsearch-data --cascade=false
4.7.2-0.1.0

6 years ago

4.7.1-0.1.0

6 years ago

4.7.0-0.1.0

6 years ago

4.6.5-0.1.0

6 years ago

4.6.4-0.1.0

6 years ago

4.6.3-0.1.0

6 years ago

4.6.2-0.1.0

6 years ago

4.6.1-0.1.0

6 years ago

4.6.0-0.1.0

6 years ago

4.5.0-0.1.0

6 years ago

4.4.0-0.1.0

6 years ago

4.3.0-0.1.0

6 years ago

4.2.9-0.1.0

6 years ago

4.2.8-0.1.0

6 years ago

4.2.7-0.1.0

6 years ago

4.2.6-0.1.0

6 years ago

4.2.5-0.1.0

6 years ago

4.2.4-0.1.0

6 years ago

4.2.3-0.1.0

6 years ago

4.2.2-0.1.0

6 years ago

4.2.13-0.1.0

6 years ago

4.2.12-0.1.0

6 years ago

4.2.11-0.1.0

6 years ago

4.2.10-0.1.0

6 years ago

4.2.1-0.1.0

6 years ago

4.2.0-0.1.0

6 years ago

4.1.3-0.1.0

6 years ago

4.1.2-0.1.0

6 years ago

4.1.1-0.1.0

6 years ago

4.1.0-0.1.0

6 years ago

4.0.3-0.1.0

6 years ago

4.0.2-0.1.0

6 years ago

4.0.1-0.1.0

6 years ago

4.0.0-0.1.0

6 years ago

3.0.0-0.1.0

6 years ago

2.0.5-0.1.0

6 years ago

2.0.4-0.1.0

6 years ago

2.0.3-0.1.0

6 years ago

2.0.2-0.1.0

6 years ago

2.0.1-0.1.0

6 years ago

2.0.0-0.1.0

6 years ago

1.0.8-0.1.0

6 years ago

1.0.7-0.1.0

6 years ago

1.0.6-0.1.0

6 years ago

1.0.5-0.1.0

6 years ago

1.0.4-0.1.0

6 years ago

1.0.3-0.1.0

6 years ago

1.0.2-0.1.0

6 years ago

1.0.1-0.1.0

6 years ago

1.0.0-0.1.0

6 years ago

0.1.7-0.1.0

6 years ago

0.1.6-0.1.0

6 years ago

0.1.5-0.1.0

6 years ago

0.1.4-0.1.0

6 years ago

0.1.3-0.1.0

6 years ago

0.1.2-0.1.0

6 years ago

0.1.1-0.1.0

6 years ago