I have fedora configured correctly I believe, but i want to ask if there is something i am forgetting. When i swap out fedora clustered for a single unclustered fedora instance, it works normally with usual performance. but on using clustered, its 10-20 times slower taking 1-2 minutes for simple actions such as creating collection.
When I interact directly with fedora rest it seems to behave normally. I'm wondering has anyone else had issues like this? if this is an issue with activefedora? and also if the issue with fedora 4.4 clustering is being resolved?
<TCP bind_port="7800"
loopback_separate_thread="true"
loopback="true"
recv_buf_size="${tcp.recv_buf_size:5M}"
send_buf_size="${tcp.send_buf_size:640K}"
max_bundle_size="64K"
max_bundle_timeout="30"
use_send_queues="true"
sock_conn_timeout="300"
timer_type="new3"
timer.min_threads="4"
timer.max_threads="10"
timer.keep_alive_time="3000"
timer.queue_max_size="500"
thread_pool.enabled="true"
thread_pool.min_threads="1"
thread_pool.max_threads="10"
thread_pool.keep_alive_time="5000"
thread_pool.queue_enabled="true"
thread_pool.queue_max_size="10000"
thread_pool.rejection_policy="discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="1"
oob_thread_pool.max_threads="8"
oob_thread_pool.keep_alive_time="5000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="discard"/>
<MPING timeout="1000"
num_initial_members="1"/>
<MERGE2 max_interval="30000"
min_interval="10000"/>
<FD_ALL timeout="150000"/>
<VERIFY_SUSPECT timeout="150000" />
<BARRIER />
<pbcast.NAKACK2 use_mcast_xmit="false"
discard_delivered_msgs="true"/>
<UNICAST timeout="600,900,2500"/>
<pbcast.STABLE stability_delay="2000" desired_avg_gossip="50000"
max_bytes="4M"/>
<pbcast.GMS print_local_addr="true" join_timeout="6000"
view_bundling="true"/>
<MFC max_credits="2M"
min_threshold="0.4"/>
<FRAG2 frag_size="60K" />
<pbcast.STATE_TRANSFER />
</config>
xmlns="urn:infinispan:config:6.0">
<global>
<globalJmxStatistics enabled="true" allowDuplicateDomains="true"/>
<!-- Defines the global settings shared by all caches -->
<transport clusterName="test-cluster">
<properties>
<property name="configurationFile" value="/opt/fedora/etc/jgroups-fcrepo-tcp.xml"/>
</properties>
</transport>
</global>
<default>
<!--
Defines the default behavior for all caches, including those created dynamically (e.g., when a
repository uses a cache that doesn't exist in this configuration).
-->
<clustering mode="distribution">
<sync/>
<l1 enabled="false" lifespan="0" onRehash="false"/>
<hash numOwners="${fcrepo.ispn.numOwners:2}"/>
<stateTransfer chunkSize="100" fetchInMemoryState="true"/>
</clustering>
</default>
<namedCache name="FedoraRepository">
<!--
Our Infinispan cache needs to be transactional. However, we'll also configure it to
use pessimistic locking, which is required whenever applications will be concurrently
updating nodes within the same process. If you're not sure, use pessimistic locking.
-->
<clustering mode="replication">
<sync/>
<stateTransfer chunkSize="100" fetchInMemoryState="true"/>
</clustering>
<locking concurrencyLevel="1000" lockAcquisitionTimeout="15000" useLockStriping="false" />
<deadlockDetection enabled="true" spinDuration="1000"/>
<eviction maxEntries="500" strategy="LIRS" threadPolicy="DEFAULT"/>
<transaction
transactionManagerLookupClass="org.infinispan.transaction.lookup.GenericTransactionManagerLookup"
transactionMode="TRANSACTIONAL" lockingMode="PESSIMISTIC"/>
<!--
Define the cache loaders (i.e., cache stores). Passivation is false because we want *all*
data to be persisted, not just what doesn't fit into memory. Shared is false because there
are no other caches sharing this file store. We set preload to false for lazy loading;
may be improved by preloading and configuring eviction.
We can have multiple cache loaders, which get chained. But we'll define just one.
-->
<persistence passivation="false">
<singleFile shared="false"
preload="false"
fetchPersistentState="true"
purgeOnStartup="false"
location="${fcrepo.ispn.repo.cache:target/FedoraRepository/storage}"/>
</persistence>
</namedCache>
<namedCache name="FedoraRepositoryMetaData">
<!--
Our Infinispan cache needs to be transactional. However, we'll also configure it to
use pessimistic locking, which is required whenever applications will be concurrently
updating nodes within the same process. If you're not sure, use pessimistic locking.
-->
<clustering mode="replication">
<sync/>
<stateTransfer chunkSize="100" fetchInMemoryState="true"/>
</clustering>
<locking concurrencyLevel="1000" lockAcquisitionTimeout="15000" useLockStriping="false" />
<deadlockDetection enabled="true" spinDuration="1000"/>
<eviction maxEntries="500" strategy="LIRS" threadPolicy="DEFAULT"/>
<transaction
transactionManagerLookupClass="org.infinispan.transaction.lookup.GenericTransactionManagerLookup"
transactionMode="TRANSACTIONAL" lockingMode="PESSIMISTIC"/>
<!--
Define the cache loaders (i.e., cache stores). Passivation is false because we want *all*
data to be persisted, not just what doesn't fit into memory. Shared is false because there
are no other caches sharing this file store. We set preload to false for lazy loading;
may be improved by preloading and configuring eviction.
We can have multiple cache loaders, which get chained. But we'll define just one.
-->
<persistence passivation="false">
<singleFile shared="false"
preload="false"
fetchPersistentState="true"
purgeOnStartup="false"
location="${fcrepo.ispn.cache:target/FedoraRepositoryMetaData/storage}"/>
</persistence>
</namedCache>
<namedCache name="FedoraRepositoryBinaryData">
<!--
Our Infinispan cache needs to be transactional. However, we'll also configure it to
use pessimistic locking, which is required whenever applications will be concurrently
updating nodes within the same process. If you're not sure, use pessimistic locking.
-->
<clustering mode="distribution">
<sync replTimeout="${fcrepo.ispn.replication.timeout:10000}" />
<l1 enabled="false" lifespan="0" onRehash="false"/>
<hash numOwners="${fcrepo.ispn.numOwners:2}" numSegments="40"/>
<stateTransfer chunkSize="100" fetchInMemoryState="true"/>
</clustering>
<locking concurrencyLevel="1000" lockAcquisitionTimeout="15000" useLockStriping="false" />
<deadlockDetection enabled="true" spinDuration="1000"/>
<eviction maxEntries="100" strategy="LIRS" threadPolicy="DEFAULT"/>
<transaction
transactionManagerLookupClass="org.infinispan.transaction.lookup.GenericTransactionManagerLookup"
transactionMode="TRANSACTIONAL" lockingMode="PESSIMISTIC"/>
<!--
Define the cache loaders (i.e., cache stores). Passivation is false because we want *all*
data to be persisted, not just what doesn't fit into memory. Shared is false because there
are no other caches sharing this file store. We set preload to false for lazy loading;
may be improved by preloading and configuring eviction.
We can have multiple cache loaders, which get chained. But we'll define just one.
-->
<persistence passivation="false">
<singleFile shared="false"
preload="false"
fetchPersistentState="true"
purgeOnStartup="false"
location="${fcrepo.ispn.binary.cache:target/FedoraRepositoryBinaryData/storage}"/>
</persistence>
</namedCache>
</infinispan>