/ launching_quai_network
/ sequence_initiated
/ launching_quai_network
/ sequence_initiated
/ launching_quai_network
/ sequence_initiated
/ 56% complete
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info;
int nblocks;
int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */
nblocks = nblocks ? : 1;
group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info)
return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize
<= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;
else {
for (i = 0; i
< nblocks; i++) {
gid_t *b;
b = (void *)__get_free_page(GFP_USER);
if (!b)
goto out_undo_partial_alloc;
group_info->blocks[i] = b;
}
}
return group_info;
EXPORT_SYMBOL(groups_alloc);
void groups_free(struct group_info *group_info)
{
if (group_info->blocks[0] != group_info->small_block) {
int i;
for (i = 0; i < group_info->nblocks; i++)
free_page((unsigned long)group_info->blocks[i]);
}
kfree(group_info);
}
EXPORT_SYMB|
/ launching_quai_network
/ sequence_initiated
/ launching_quai_network
/ sequence_initiated
/ launching_quai_network
/ sequence_initiated
/ 56% complete
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
struct group_info *groups_alloc(int gidsetsize){
struct group_info *group_info;
int nblocks;
int i;
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
/* Make sure we always allocate at least one indirect block pointer */
nblocks = nblocks ? : 1;
group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
if (!group_info)
return NULL;
group_info->ngroups = gidsetsize;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
if (gidsetsize
<= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;
else {
for (i = 0; i
< nblocks; i++) {
gid_t *b;
b = (void *)__get_free_page(GFP_USER);
if (!b)
goto out_undo_partial_alloc;
group_info->blocks[i] = b;
}
}
return group_info;
EXPORT_SYMBOL(groups_alloc);
void groups_free(struct group_info *group_info)
{
if (group_info->blocks[0] != group_info->small_block) {
int i;
for (i = 0; i < group_info->nblocks; i++)
free_page((unsigned long)group_info->blocks[i]);
}
kfree(group_info);
}
EXPORT_SYMB|
/ launching_quai_network
/ sequence_initiated
/
loading...
All Quai Network blockchains are braided together, keeping the entire network censorship resistant and secure creating Scalable Proof-of-Work.
Quai allows anyone to participate in network governance by running a node or miner. With thousands of participants distributed across the globe, there is no single party with the ability to modify or turn off the network, ensuring zero network downtime.
Quai Network automatically expands with demand to upwards of 50,000 TPS while keeping fees under $0.01.
Transactions in Quai Network can be locally confirmed prior to global confirmation, offering high throughput with the shortest possible time to economic finality.
All blockchains within Quai Network share Proof-of-Work security through merged mining. Every Quai transaction is eventually confirmed by 100% of network hash power.
Parachains inherit security and interoperability by merged mining with Quai Network, and create new incentives for miners and users.
The Prime blockchain acts as the "knot" tying all Quai Network chains together. The Prime blockchain braids sub networks together, facilitating the transfer of data across chains.
Quai's many high-speed sub networks independently and asynchronously process transactions. All sub networks are braided together by the Prime chain, ensuring shared security and interoperability across the network.