Esta página no está disponible actualmente en Español. Si lo necesita, póngase en contacto con el servicio de asistencia de Icon (correo electrónico)
Configuration Overview
This is a guide on how to set up Custom Processing Settings properly in Docker or Kubernetes environment. Below are examples of docker-compose file and configuration, as well as kubernetes manifests with comments explaining importance of each property.
Docker
Docker Compose
version: "3.0"
services:
ipf-mongo:
image: registry.ipf.iconsolutions.com/ipf-docker-mongodb:latest
container_name: ipf-mongo
ports:
- 27017:27017
command: --replSet test --bind_ip_all
healthcheck:
test: echo 'db.runCommand("ping").ok' | mongo localhost:27017/test --quiet
volumes:
- "./config/mongodb/init-replication.sh:/docker-entrypoint-initdb.d/01_init_replication.sh"
zookeeper:
image: zookeeper
container_name: zookeeper
restart: unless-stopped
ports:
- "2181:2181"
healthcheck:
test: [ "CMD", "zkServer.sh", "status" ]
interval: 5s
timeout: 10s
retries: 10
kafka:
image: registry.ipf.iconsolutions.com/kafka-icon:2.13-2.7.1
container_name: kafka
restart: always
ports:
- "9092:9092"
- "9093:9093"
environment:
- KAFKA_BROKER_ID=0
- KAFKA_AUTO_CREATE_TOPICS_ENABLE=false
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_LOG_RETENTION_MINUTES=10
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS=1
- KAFKA_LISTENERS=PLAINTEXT://:9092,LOCAL://:9093
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=LOCAL:PLAINTEXT,PLAINTEXT:PLAINTEXT
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,LOCAL://localhost:9093
- KAFKA_CREATE_TOPICS=CPS_CRUD_NOTIFICATION:3:1
healthcheck:
test: nc -z localhost 9092 || exit -1
interval: 5s
timeout: 10s
retries: 10
depends_on:
zookeeper:
condition: service_healthy
custom-processing-settings-app:
image: registry.ipf.iconsolutions.com/custom-processing-settings-application:latest
container_name: custom-processing-settings-app
ports:
- "8089:8080"
- "5003:5005"
user: "${UID:-root}:${GID:-root}"
environment:
- IPF_JAVA_ARGS=-Dma.glasnost.orika.writeClassFiles=false -Dma.glasnost.orika.writeSourceFiles=false
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost:8080/actuator/health" ]
interval: 5s
depends_on:
ipf-mongo:
condition: service_healthy
kafka:
condition: service_healthy
application.conf
// setup Mongo db configuration
ipf.mongodb.url = "mongodb://ipf-mongo:27017/ipf"
spring.data.mongodb.uri = ${?ipf.mongodb.url}
actor-system-name = custom-processing-settings
akka {
// configure akka cluster and seed nodes
cluster.bootstrap.enabled = false
cluster.seed-nodes = ["akka://"${actor-system-name}"@0.0.0.0:"${akka.remote.artery.canonical.port}]
remote.artery {
canonical.port = 55001
canonical.hostname = 0.0.0.0
bind.hostname = 0.0.0.0
bind.port = 55001
}
// configure akka kafka producer
kafka.producer.kafka-clients.bootstrap.servers = "kafka:9092"
}
Kubernetes
Deployment Manifest
apiVersion: apps/v1
kind: Deployment
metadata:
name: custom-processing-settings
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/"
prometheus.io/port: "9001"
spec:
replicas: 3
selector:
matchLabels:
app: custom-processing-settings
product: ipfv2
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/"
prometheus.io/port: "9001"
labels:
app: custom-processing-settings
product: ipfv2
spec:
imagePullSecrets:
- name: "registrysecret"
containers:
- name: custom-processing-settings
image: ${docker.registry}/custom-processing-settings-application:latest
imagePullPolicy: Always
securityContext:
runAsUser: 0
allowPrivilegeEscalation: true
ports:
- name: http
containerPort: 8080
- name: debug-port
containerPort: 5005
- name: akka-artery
containerPort: 55001
- name: akka-management
containerPort: 8558
- name: akka-metrics
containerPort: 9001
livenessProbe:
failureThreshold: 5
httpGet:
path: /health/alive
port: akka-management
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 2
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /health/ready
port: akka-management
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 2
successThreshold: 10
timeoutSeconds: 1
env:
- name: "POD_NAME"
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: "IPF_JAVA_ARGS"
value: "-Dma.glasnost.orika.writeClassFiles=false -Dma.glasnost.orika.writeSourceFiles=false"
- name: "POD_IP"
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: "KUBERNETES_NAMESPACE"
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
memory: ${custom-processing-settings.min-mem}
cpu: ${custom-processing-settings.min-cpu}
volumeMounts:
- mountPath: /custom-processing-settings-application/conf/logback.xml
name: config-volume
subPath: logback.xml
- mountPath: /custom-processing-settings-application/conf/application.conf
name: config-volume
subPath: application.conf
volumes:
- name: config-volume
configMap:
name: custom-processing-settings-cm
- name: keystore
secret:
secretName: keystore
Service Manifest
apiVersion: v1
kind: Service
metadata:
name: custom-processing-settings
labels:
product: ipfv2
spec:
selector:
app: custom-processing-settings
product: ipfv2
ports:
- protocol: TCP
port: 80
targetPort: 8080
name: actuator
- protocol: TCP
port: 9001
targetPort: 9001
name: akka-metrics
- protocol: TCP
port: 55001
targetPort: 55001
name: remoting
- protocol: TCP
port: 5005
targetPort: 5005
name: debug
ConfigMap Manifest
apiVersion: v1
kind: ConfigMap
metadata:
name: custom-processing-settings-cm
data:
application.conf: |-
# mongo configuration setup
spring.data.mongodb.uri = "${ipf.mongodb.url}"
# akka configuration setup
akka {
# Use Kubernetes API to discover the cluster
discovery {
kubernetes-api {
pod-label-selector = "app=custom-processing-settings"
}
}
actor.provider = cluster
cluster {
# configuring seed nodes
seed-nodes = []
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
split-brain-resolver {
active-strategy = keep-majority
stable-after = 20s
}
# sharding
sharding {
handoff-timeout = 8s
least-shard-allocation-strategy.rebalance-absolute-limit = 20
rebalance-interval = 2s
number-of-shards = 10
passivation {
strategy = "default-strategy"
default-strategy.active-entity-limit = 20000
}
}
}
# using specific pod IP as hostaname for each instance
remote.artery.canonical.hostname = ${POD_IP}
management {
health-checks {
readiness-path = "health/ready"
liveness-path = "health/alive"
}
# use the Kubernetes API to create the cluster
cluster.bootstrap {
contact-point-discovery {
service-name = "custom-processing-settings"
discovery-method = kubernetes-api
required-contact-point-nr = 1
required-contact-point-nr = ${?REQUIRED_CONTACT_POINT_NR}
}
}
}
# kafka configuration
kafka {
producer {
kafka-clients {
bootstrap.servers = "kafka:9092"
}
}
}
}
ipf {
// default timeout
behaviour.retries {
initial-timeout = 1s
}
# ipf mongo set up
mongodb.url = "mongodb://ipf-mongo:27017/ipf"
}
logback.xml: |-
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="ConsoleAppender" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
%d{dd-MM-yyyy HH:mm:ss.SSS} %magenta([%thread]) %highlight(%-5level) %logger{36}.%M - %msg%n
</pattern>
</encoder>
<immediateFlush>false</immediateFlush>
</appender>
<appender name="STDOUT" class="ch.qos.logback.classic.AsyncAppender">
<queueSize>1000</queueSize>
<discardingThreshold>0</discardingThreshold>
<appender-ref ref="ConsoleAppender"/>
<includeCallerData>true</includeCallerData>
</appender>
<logger name="com.iconsolutions.testfw.core.service.ExpressionEngine" level="OFF"/>
<logger name="akka.stream.scaladsl.RestartWithBackoffSource" level="OFF"/>
<logger name="com.iconsolutions.ipf.core.systemevents.utils" level="DEBUG"/>
<root level="INFO">
<appender-ref ref="STDOUT"/>
</root>
</configuration>