之前docker for windows v2.1.0.5的时候都没什么问题,最近更新v2.2.0.0后发现容器运行几天后就将64G的Hyper-V docker虚拟机空间全占用完
version: "2"
services:
memcached:
image: memcached:1.5.6
container_name: seafile-memcached
restart: unless-stopped
entrypoint: memcached -m 256
networks:
- seafile-net
elasticsearch:
image: seafileltd/elasticsearch-with-ik:5.6.16
container_name: seafile-elasticsearch
restart: unless-stopped
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
ulimits:
memlock:
soft: -1
hard: -1
mem_limit: 2g
volumes:
- C:/Docker/elasticsearch/data:/usr/share/elasticsearch/data
networks:
- seafile-net
seafile:
image: docker.seafile.top/seafileltd/seafile-pro-mc:latest
container_name: seafile
restart: unless-stopped
ports:
- 10002:443
volumes:
- D:/Docker/seafile/data:/shared
environment:
- DB_HOST=192.168.9.13
- DB_ROOT_PASSWD=password
- TIME_ZONE=Asia/Shanghai
- SEAFILE_ADMIN_EMAIL=test@test.com
- SEAFILE_ADMIN_PASSWORD=password
- SEAFILE_SERVER_LETSENCRYPT=true
- SEAFILE_SERVER_HOSTNAME=example.com
depends_on:
- memcached
- elasticsearch
networks:
- seafile-net
networks:
seafile-net:
因为很难一直观察容器运行状况,只是在C:\Users\Administrator\AppData\Roaming\Docker\log\vm\docker.log里定位到是elasticsearch-with-ik一直在写入error message的日志,具体如下:
2020-02-01T05:36:58Z docker time="2020-02-01T05:36:58.591593961Z" level=error msg="Failed to log msg \"\" for logger json-file: write /var/lib/docker/containers/727f86ac3975422d8c28c5aab47fa9a85050eaea12d6e5ec729e16b22f950c02/727f86ac3975422d8c28c5aab47fa9a85050eaea12d6e5ec729e16b22f950c02-json.log: no space left on device"
暂时还无法确认是什么问题导致elasticsearch容器在一直扩大空间占用,最后导致不断重复空间不足写入log
docker logs内容如下,WARN也是在不停重复:
[2020-02-01T06:37:16,417][WARN ][o.e.i.c.IndicesClusterStateService] [lx-BZmX] [[repofiles][4]] marking and sending shard failed due to [shard failure, reason [lucene commit failed]]
org.apache.lucene.store.AlreadyClosedException: Underlying file changed by an external force at 1970-01-01T00:00:00Z, (lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/indices/J8X5pA8RSlez7-tYTm6Bnw/4/index/write.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-01T06:37:16.228098Z))
at org.apache.lucene.store.NativeFSLockFactory$NativeFSLock.ensureValid(NativeFSLockFactory.java:179) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.store.LockValidatingDirectoryWrapper.createOutput(LockValidatingDirectoryWrapper.java:43) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.SegmentInfos.write(SegmentInfos.java:471) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.SegmentInfos.prepareCommit(SegmentInfos.java:775) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.startCommit(IndexWriter.java:4707) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.prepareCommitInternal(IndexWriter.java:3085) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.commitInternal(IndexWriter.java:3244) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:3207) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.elasticsearch.index.engine.InternalEngine.commitIndexWriter(InternalEngine.java:1588) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.engine.InternalEngine.openTranslog(InternalEngine.java:283) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.engine.InternalEngine.<init>(InternalEngine.java:160) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.engine.InternalEngineFactory.newReadWriteEngine(InternalEngineFactory.java:25) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.newEngine(IndexShard.java:1602) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.createNewEngine(IndexShard.java:1584) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.internalPerformTranslogRecovery(IndexShard.java:1027) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.performTranslogRecovery(IndexShard.java:987) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.internalRecoverFromStore(StoreRecovery.java:360) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromStore$0(StoreRecovery.java:90) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:257) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.recoverFromStore(StoreRecovery.java:88) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.recoverFromStore(IndexShard.java:1236) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$1(IndexShard.java:1484) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:576) [elasticsearch-5.6.16.jar:5.6.16]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_212]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_212]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_212]
[2020-02-01T06:37:16,420][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [shard failure, reason [lucene commit failed]], failure [AlreadyClosedException[Underlying file changed by an external force at 1970-01-01T00:00:00Z, (lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/indices/J8X5pA8RSlez7-tYTm6Bnw/4/index/write.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-01T06:37:16.228098Z))]]
org.apache.lucene.store.AlreadyClosedException: Underlying file changed by an external force at 1970-01-01T00:00:00Z, (lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/indices/J8X5pA8RSlez7-tYTm6Bnw/4/index/write.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-01T06:37:16.228098Z))
at org.apache.lucene.store.NativeFSLockFactory$NativeFSLock.ensureValid(NativeFSLockFactory.java:179) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.store.LockValidatingDirectoryWrapper.createOutput(LockValidatingDirectoryWrapper.java:43) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.SegmentInfos.write(SegmentInfos.java:471) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.SegmentInfos.prepareCommit(SegmentInfos.java:775) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.startCommit(IndexWriter.java:4707) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.prepareCommitInternal(IndexWriter.java:3085) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.commitInternal(IndexWriter.java:3244) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:3207) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
at org.elasticsearch.index.engine.InternalEngine.commitIndexWriter(InternalEngine.java:1588) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.engine.InternalEngine.openTranslog(InternalEngine.java:283) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.engine.InternalEngine.<init>(InternalEngine.java:160) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.engine.InternalEngineFactory.newReadWriteEngine(InternalEngineFactory.java:25) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.newEngine(IndexShard.java:1602) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.createNewEngine(IndexShard.java:1584) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.internalPerformTranslogRecovery(IndexShard.java:1027) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.performTranslogRecovery(IndexShard.java:987) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.internalRecoverFromStore(StoreRecovery.java:360) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromStore$0(StoreRecovery.java:90) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:257) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.StoreRecovery.recoverFromStore(StoreRecovery.java:88) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.recoverFromStore(IndexShard.java:1236) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$1(IndexShard.java:1484) ~[elasticsearch-5.6.16.jar:5.6.16]
at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:576) [elasticsearch-5.6.16.jar:5.6.16]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_212]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_212]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_212]
[2020-02-01T06:37:16,496][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,677][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,791][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,849][INFO ][o.e.c.r.a.AllocationService] [lx-BZmX] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[repofiles][3]] ...]).
[2020-02-01T06:37:16,852][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,890][INFO ][o.e.c.r.a.AllocationService] [lx-BZmX] Cluster health status changed from [YELLOW] to [RED] (reason: [shards failed [[repofiles][4], [repofiles][4], [repofiles][4], [repofiles][4], [repofiles][4], [repofiles][4]] ...]).
[2020-02-01T06:37:16,970][INFO ][o.e.c.r.a.AllocationService] [lx-BZmX] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[repofiles][4]] ...]).
[2020-02-01T06:37:17,054][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,060][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [not_analyzed]
[2020-02-01T06:37:17,063][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,063][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,106][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,106][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [not_analyzed]
[2020-02-01T06:37:17,107][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,107][WARN ][o.e.d.i.m.TypeParsers ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,111][INFO ][o.e.c.m.MetaDataMappingService] [lx-BZmX] [repofiles/J8X5pA8RSlez7-tYTm6Bnw] create_mapping [file]
[2020-02-01T06:37:17,182][WARN ][o.e.d.c.ParseField ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:38:11,404][WARN ][o.e.d.c.ParseField ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:38:12,314][INFO ][o.e.m.j.JvmGcMonitorService] [lx-BZmX] [gc][young][87][6] duration [900ms], collections [1]/[1.5s], total [900ms]/[2.9s], memory [195.1mb]->[121.7mb]/[1007.3mb], all_pools {[young] [132.7mb]->[433.3kb]/[133.1mb]}{[survivor] [15.6mb]->[16.6mb]/[16.6mb]}{[old] [46.8mb]->[104.6mb]/[857.6mb]}
[2020-02-01T06:38:12,315][WARN ][o.e.m.j.JvmGcMonitorService] [lx-BZmX] [gc][87] overhead, spent [900ms] collecting in the last [1.5s]
[2020-02-01T06:38:19,107][WARN ][o.e.d.c.ParseField ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:38:22,230][WARN ][o.e.d.c.ParseField ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:39:08,684][WARN ][o.e.d.c.ParseField ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:39:10,186][WARN ][o.e.d.c.ParseField ] Deprecated field [include] used, expected [includes] instead
搜索论坛发现有类似的贴子,说是elasticsearch.log在不断增大,是不是同一个问题?
另外我看elasticsearch-with-ik还停留在5.6.16版本,是不是该更新维护以下了?