(docker)elasticsearch-with-ik:5.6.2不停占用空间

之前docker for windows v2.1.0.5的时候都没什么问题,最近更新v2.2.0.0后发现容器运行几天后就将64G的Hyper-V docker虚拟机空间全占用完

version: "2"

services:

  memcached:
    image: memcached:1.5.6
    container_name: seafile-memcached
    restart: unless-stopped
    entrypoint: memcached -m 256
    networks:
      - seafile-net

  elasticsearch:
    image: seafileltd/elasticsearch-with-ik:5.6.16
    container_name: seafile-elasticsearch
    restart: unless-stopped
    environment:
      - discovery.type=single-node
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms1g -Xmx1g"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    mem_limit: 2g
    volumes:
      - C:/Docker/elasticsearch/data:/usr/share/elasticsearch/data
    networks:
      - seafile-net

  seafile:
    image: docker.seafile.top/seafileltd/seafile-pro-mc:latest
    container_name: seafile
    restart: unless-stopped
    ports:
      - 10002:443
    volumes:
      - D:/Docker/seafile/data:/shared
    environment:
      - DB_HOST=192.168.9.13
      - DB_ROOT_PASSWD=password
      - TIME_ZONE=Asia/Shanghai
      - SEAFILE_ADMIN_EMAIL=test@test.com
      - SEAFILE_ADMIN_PASSWORD=password
      - SEAFILE_SERVER_LETSENCRYPT=true
      - SEAFILE_SERVER_HOSTNAME=example.com
    depends_on:
      - memcached
      - elasticsearch
    networks:
      - seafile-net

networks:
  seafile-net:

因为很难一直观察容器运行状况,只是在C:\Users\Administrator\AppData\Roaming\Docker\log\vm\docker.log里定位到是elasticsearch-with-ik一直在写入error message的日志,具体如下:

2020-02-01T05:36:58Z docker time="2020-02-01T05:36:58.591593961Z" level=error msg="Failed to log msg \"\" for logger json-file: write /var/lib/docker/containers/727f86ac3975422d8c28c5aab47fa9a85050eaea12d6e5ec729e16b22f950c02/727f86ac3975422d8c28c5aab47fa9a85050eaea12d6e5ec729e16b22f950c02-json.log: no space left on device"

暂时还无法确认是什么问题导致elasticsearch容器在一直扩大空间占用,最后导致不断重复空间不足写入log

docker logs内容如下,WARN也是在不停重复:

[2020-02-01T06:37:16,417][WARN ][o.e.i.c.IndicesClusterStateService] [lx-BZmX] [[repofiles][4]] marking and sending shard failed due to [shard failure, reason [lucene commit failed]]
org.apache.lucene.store.AlreadyClosedException: Underlying file changed by an external force at 1970-01-01T00:00:00Z, (lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/indices/J8X5pA8RSlez7-tYTm6Bnw/4/index/write.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-01T06:37:16.228098Z))
        at org.apache.lucene.store.NativeFSLockFactory$NativeFSLock.ensureValid(NativeFSLockFactory.java:179) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.store.LockValidatingDirectoryWrapper.createOutput(LockValidatingDirectoryWrapper.java:43) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.SegmentInfos.write(SegmentInfos.java:471) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.SegmentInfos.prepareCommit(SegmentInfos.java:775) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.startCommit(IndexWriter.java:4707) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.prepareCommitInternal(IndexWriter.java:3085) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.commitInternal(IndexWriter.java:3244) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:3207) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.elasticsearch.index.engine.InternalEngine.commitIndexWriter(InternalEngine.java:1588) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.engine.InternalEngine.openTranslog(InternalEngine.java:283) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.engine.InternalEngine.<init>(InternalEngine.java:160) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.engine.InternalEngineFactory.newReadWriteEngine(InternalEngineFactory.java:25) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.newEngine(IndexShard.java:1602) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.createNewEngine(IndexShard.java:1584) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.internalPerformTranslogRecovery(IndexShard.java:1027) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.performTranslogRecovery(IndexShard.java:987) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.internalRecoverFromStore(StoreRecovery.java:360) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromStore$0(StoreRecovery.java:90) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:257) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.recoverFromStore(StoreRecovery.java:88) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.recoverFromStore(IndexShard.java:1236) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$1(IndexShard.java:1484) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:576) [elasticsearch-5.6.16.jar:5.6.16]
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_212]
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_212]
        at java.lang.Thread.run(Thread.java:748) [?:1.8.0_212]
[2020-02-01T06:37:16,420][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [shard failure, reason [lucene commit failed]], failure [AlreadyClosedException[Underlying file changed by an external force at 1970-01-01T00:00:00Z, (lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/indices/J8X5pA8RSlez7-tYTm6Bnw/4/index/write.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-01T06:37:16.228098Z))]]
org.apache.lucene.store.AlreadyClosedException: Underlying file changed by an external force at 1970-01-01T00:00:00Z, (lock=NativeFSLock(path=/usr/share/elasticsearch/data/nodes/0/indices/J8X5pA8RSlez7-tYTm6Bnw/4/index/write.lock,impl=sun.nio.ch.FileLockImpl[0:9223372036854775807 exclusive valid],creationTime=2020-02-01T06:37:16.228098Z))
        at org.apache.lucene.store.NativeFSLockFactory$NativeFSLock.ensureValid(NativeFSLockFactory.java:179) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.store.LockValidatingDirectoryWrapper.createOutput(LockValidatingDirectoryWrapper.java:43) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.SegmentInfos.write(SegmentInfos.java:471) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.SegmentInfos.prepareCommit(SegmentInfos.java:775) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.startCommit(IndexWriter.java:4707) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.prepareCommitInternal(IndexWriter.java:3085) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.commitInternal(IndexWriter.java:3244) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:3207) ~[lucene-core-6.6.1.jar:6.6.1 9aa465a89b64ff2dabe7b4d50c472de32c298683 - varunthacker - 2017-08-29 21:54:39]
        at org.elasticsearch.index.engine.InternalEngine.commitIndexWriter(InternalEngine.java:1588) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.engine.InternalEngine.openTranslog(InternalEngine.java:283) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.engine.InternalEngine.<init>(InternalEngine.java:160) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.engine.InternalEngineFactory.newReadWriteEngine(InternalEngineFactory.java:25) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.newEngine(IndexShard.java:1602) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.createNewEngine(IndexShard.java:1584) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.internalPerformTranslogRecovery(IndexShard.java:1027) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.performTranslogRecovery(IndexShard.java:987) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.internalRecoverFromStore(StoreRecovery.java:360) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.lambda$recoverFromStore$0(StoreRecovery.java:90) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.executeRecovery(StoreRecovery.java:257) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.StoreRecovery.recoverFromStore(StoreRecovery.java:88) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.recoverFromStore(IndexShard.java:1236) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.index.shard.IndexShard.lambda$startRecovery$1(IndexShard.java:1484) ~[elasticsearch-5.6.16.jar:5.6.16]
        at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:576) [elasticsearch-5.6.16.jar:5.6.16]
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_212]
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_212]
        at java.lang.Thread.run(Thread.java:748) [?:1.8.0_212]
[2020-02-01T06:37:16,496][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,677][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,791][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,849][INFO ][o.e.c.r.a.AllocationService] [lx-BZmX] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[repofiles][3]] ...]).
[2020-02-01T06:37:16,852][WARN ][o.e.c.a.s.ShardStateAction] [lx-BZmX] [repofiles][4] received shard failed for shard id [[repofiles][4]], allocation id [eTKGnNmBS2WwJosA9Dk0Gw], primary term [0], message [master {lx-BZmX}{lx-BZmXtSNOqHQP_ljZUpA}{LkZ6ZkkJTlaV7QqYkZQS9Q}{127.0.0.1}{127.0.0.1:9300} has not removed previously failed shard. resending shard failure]
[2020-02-01T06:37:16,890][INFO ][o.e.c.r.a.AllocationService] [lx-BZmX] Cluster health status changed from [YELLOW] to [RED] (reason: [shards failed [[repofiles][4], [repofiles][4], [repofiles][4], [repofiles][4], [repofiles][4], [repofiles][4]] ...]).
[2020-02-01T06:37:16,970][INFO ][o.e.c.r.a.AllocationService] [lx-BZmX] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[repofiles][4]] ...]).
[2020-02-01T06:37:17,054][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,060][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [not_analyzed]
[2020-02-01T06:37:17,063][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,063][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,106][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,106][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [not_analyzed]
[2020-02-01T06:37:17,107][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,107][WARN ][o.e.d.i.m.TypeParsers    ] Expected a boolean [true/false] for property [index] but got [analyzed]
[2020-02-01T06:37:17,111][INFO ][o.e.c.m.MetaDataMappingService] [lx-BZmX] [repofiles/J8X5pA8RSlez7-tYTm6Bnw] create_mapping [file]
[2020-02-01T06:37:17,182][WARN ][o.e.d.c.ParseField       ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:38:11,404][WARN ][o.e.d.c.ParseField       ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:38:12,314][INFO ][o.e.m.j.JvmGcMonitorService] [lx-BZmX] [gc][young][87][6] duration [900ms], collections [1]/[1.5s], total [900ms]/[2.9s], memory [195.1mb]->[121.7mb]/[1007.3mb], all_pools {[young] [132.7mb]->[433.3kb]/[133.1mb]}{[survivor] [15.6mb]->[16.6mb]/[16.6mb]}{[old] [46.8mb]->[104.6mb]/[857.6mb]}
[2020-02-01T06:38:12,315][WARN ][o.e.m.j.JvmGcMonitorService] [lx-BZmX] [gc][87] overhead, spent [900ms] collecting in the last [1.5s]
[2020-02-01T06:38:19,107][WARN ][o.e.d.c.ParseField       ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:38:22,230][WARN ][o.e.d.c.ParseField       ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:39:08,684][WARN ][o.e.d.c.ParseField       ] Deprecated field [include] used, expected [includes] instead
[2020-02-01T06:39:10,186][WARN ][o.e.d.c.ParseField       ] Deprecated field [include] used, expected [includes] instead

搜索论坛发现有类似的贴子,说是elasticsearch.log在不断增大,是不是同一个问题?

另外我看elasticsearch-with-ik还停留在5.6.16版本,是不是该更新维护以下了?

现在docker-compose up,elasticsearch的log直接就显示Yellow Status了

[2020-02-01T07:52:45,472][INFO ][o.e.n.Node               ] [] initializing ...                                                                                                                         
[2020-02-01T07:52:45,643][INFO ][o.e.e.NodeEnvironment    ] [Fh46k0I] using [1] data paths, mounts [[/usr/share/elasticsearch/data (grpcfuse)]], net usable_space [72.9gb], net total_space [110.9gb], spins? [possibly], types [fuse.grpcfuse]                                                                                                                                                                 
[2020-02-01T07:52:45,643][INFO ][o.e.e.NodeEnvironment    ] [Fh46k0I] heap size [1007.3mb], compressed ordinary object pointers [true]                                                                  
[2020-02-01T07:52:45,798][INFO ][o.e.n.Node               ] node name [Fh46k0I] derived from node ID [Fh46k0InS_GF8a2G4qx66A]; set [node.name] to override                                              
[2020-02-01T07:52:45,798][INFO ][o.e.n.Node               ] version[5.6.16], pid[1], build[3a740d1/2019-03-13T15:33:36.565Z], OS[Linux/4.19.76-linuxkit/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_212/25.212-b01]                                                                                                                                                                           
[2020-02-01T07:52:45,798][INFO ][o.e.n.Node               ] JVM arguments [-Xms2g, -Xmx2g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -Djdk.io.permissionsUseCanonicalPath=true, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Dlog4j.skipJansi=true, -XX:+HeapDumpOnOutOfMemoryError, -Xms1g, -Xmx1g, -Des.path.home=/usr/share/elasticsearch]                                                                                                                                                                                           
[2020-02-01T07:52:48,049][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [aggs-matrix-stats]                                                                                                 
[2020-02-01T07:52:48,052][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [ingest-common]                                                                                                     
[2020-02-01T07:52:48,054][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [lang-expression]                                                                                                   
[2020-02-01T07:52:48,054][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [lang-groovy]                                                                                                       
[2020-02-01T07:52:48,054][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [lang-mustache]                                                                                                     
[2020-02-01T07:52:48,055][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [lang-painless]                                                                                                     
[2020-02-01T07:52:48,055][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [parent-join]                                                                                                       
[2020-02-01T07:52:48,055][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [percolator]                                                                                                        
[2020-02-01T07:52:48,056][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [reindex]                                                                                                           
[2020-02-01T07:52:48,056][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [transport-netty3]                                                                                                  
[2020-02-01T07:52:48,056][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded module [transport-netty4]                                                                                                  
[2020-02-01T07:52:48,064][INFO ][o.e.p.PluginsService     ] [Fh46k0I] loaded plugin [analysis-ik]                                                                                                       
[2020-02-01T07:52:52,397][INFO ][o.e.d.DiscoveryModule    ] [Fh46k0I] using discovery type [zen]                                                                                                        
[2020-02-01T07:52:53,789][INFO ][o.e.n.Node               ] initialized                                                                                                                                 
[2020-02-01T07:52:53,791][INFO ][o.e.n.Node               ] [Fh46k0I] starting ...                                                                                                                      
[2020-02-01T07:52:53,990][INFO ][o.e.t.TransportService   ] [Fh46k0I] publish_address {127.0.0.1:9300}, bound_addresses {127.0.0.1:9300}                                                                
[2020-02-01T07:52:57,075][INFO ][o.e.c.s.ClusterService   ] [Fh46k0I] new_master {Fh46k0I}{Fh46k0InS_GF8a2G4qx66A}{3cAMAMAsTwKSlcsLTkQAuw}{127.0.0.1}{127.0.0.1:9300}, reason: zen-disco-elected-as-master ([0] nodes joined)                                                                                                                                                                                   
[2020-02-01T07:52:57,131][INFO ][o.e.h.n.Netty4HttpServerTransport] [Fh46k0I] publish_address {172.24.0.2:9200}, bound_addresses {0.0.0.0:9200}                                                         
[2020-02-01T07:52:57,131][INFO ][o.e.n.Node               ] [Fh46k0I] started                                                                                                                           
[2020-02-01T07:52:57,177][INFO ][o.w.a.d.Monitor          ] try load config from /usr/share/elasticsearch/config/analysis-ik/IKAnalyzer.cfg.xml                                                         
[2020-02-01T07:52:57,179][INFO ][o.w.a.d.Monitor          ] try load config from /usr/share/elasticsearch/plugins/ik/config/IKAnalyzer.cfg.xml                                                          
[2020-02-01T07:52:58,201][INFO ][o.e.g.GatewayService     ] [Fh46k0I] recovered [2] indices into cluster_state                                                                                          
[2020-02-01T07:53:00,834][INFO ][o.e.c.r.a.AllocationService] [Fh46k0I] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[repofiles][3]] ...]).

等待Seafile容器10分钟触发检索Index时发生一系列WARN错误(选了一些列举):

[2020-02-01T08:03:25,991][WARN ][o.e.g.MetaStateService   ] [Fh46k0I] [[repofiles/-VZcV_x3QmyaSJSf2PUyRA]]: failed to write index state
[2020-02-01T08:03:32,630][WARN ][o.e.g.GatewayAllocator$InternalPrimaryShardAllocator] [Fh46k0I] [repofiles][1]: failed to list shard for shard_started on node [Fh46k0InS_GF8a2G4qx66A]
org.elasticsearch.action.FailedNodeException: Failed node [Fh46k0InS_GF8a2G4qx66A]

更新:将elasticsearch的数据存储改为内部volume存储,问题解决了

估计又是docker for windows通过share访问windows文件系统的权限问题,这个在Nextcloud的时候也是问题挺严重的