001 /* 002 * CDDL HEADER START 003 * 004 * The contents of this file are subject to the terms of the 005 * Common Development and Distribution License, Version 1.0 only 006 * (the "License"). You may not use this file except in compliance 007 * with the License. 008 * 009 * You can obtain a copy of the license at 010 * trunk/opends/resource/legal-notices/OpenDS.LICENSE 011 * or https://OpenDS.dev.java.net/OpenDS.LICENSE. 012 * See the License for the specific language governing permissions 013 * and limitations under the License. 014 * 015 * When distributing Covered Code, include this CDDL HEADER in each 016 * file and include the License file at 017 * trunk/opends/resource/legal-notices/OpenDS.LICENSE. If applicable, 018 * add the following below this CDDL HEADER, with the fields enclosed 019 * by brackets "[]" replaced with your own identifying information: 020 * Portions Copyright [yyyy] [name of copyright owner] 021 * 022 * CDDL HEADER END 023 * 024 * 025 * Copyright 2006-2008 Sun Microsystems, Inc. 026 */ 027 package org.opends.server.backends; 028 029 030 031 import java.io.File; 032 import java.io.FileInputStream; 033 import java.io.FileOutputStream; 034 import java.io.InputStream; 035 import java.io.IOException; 036 import java.io.OutputStream; 037 import java.security.MessageDigest; 038 import java.util.ArrayList; 039 import java.util.Arrays; 040 import java.util.Date; 041 import java.util.HashMap; 042 import java.util.HashSet; 043 import java.util.Iterator; 044 import java.util.LinkedHashMap; 045 import java.util.LinkedHashSet; 046 import java.util.LinkedList; 047 import java.util.List; 048 import java.util.Map; 049 import java.util.Set; 050 import java.util.TreeSet; 051 import java.util.concurrent.ConcurrentHashMap; 052 import java.util.zip.Deflater; 053 import java.util.zip.ZipEntry; 054 import java.util.zip.ZipInputStream; 055 import java.util.zip.ZipOutputStream; 056 import javax.crypto.Mac; 057 058 import org.opends.messages.Message; 059 import org.opends.server.admin.Configuration; 060 import org.opends.server.admin.std.server.SchemaBackendCfg; 061 import org.opends.server.admin.server.ConfigurationChangeListener; 062 import org.opends.server.api.AlertGenerator; 063 import org.opends.server.api.Backend; 064 import org.opends.server.api.ClientConnection; 065 import org.opends.server.api.MatchingRule; 066 import org.opends.server.config.ConfigEntry; 067 import org.opends.server.config.ConfigException; 068 import org.opends.server.core.AddOperation; 069 import org.opends.server.core.DeleteOperation; 070 import org.opends.server.core.DirectoryServer; 071 import org.opends.server.core.ModifyOperation; 072 import org.opends.server.core.ModifyDNOperation; 073 import org.opends.server.core.SchemaConfigManager; 074 import org.opends.server.core.SearchOperation; 075 import org.opends.server.loggers.ErrorLogger; 076 import org.opends.server.loggers.debug.DebugTracer; 077 import org.opends.server.protocols.asn1.ASN1OctetString; 078 import org.opends.server.schema.AttributeTypeSyntax; 079 import org.opends.server.schema.DITContentRuleSyntax; 080 import org.opends.server.schema.DITStructureRuleSyntax; 081 import org.opends.server.schema.GeneralizedTimeSyntax; 082 import org.opends.server.schema.MatchingRuleUseSyntax; 083 import org.opends.server.schema.NameFormSyntax; 084 import org.opends.server.schema.ObjectClassSyntax; 085 import org.opends.server.types.*; 086 import org.opends.server.util.DynamicConstants; 087 import org.opends.server.util.LDIFException; 088 import org.opends.server.util.LDIFReader; 089 import org.opends.server.util.LDIFWriter; 090 import org.opends.server.util.Validator; 091 092 import static org.opends.messages.BackendMessages.*; 093 import static org.opends.messages.ConfigMessages.*; 094 import static org.opends.messages.SchemaMessages.*; 095 import static org.opends.server.config.ConfigConstants.*; 096 import static org.opends.server.loggers.debug.DebugLogger.*; 097 import static org.opends.server.loggers.ErrorLogger.*; 098 import static org.opends.server.schema.SchemaConstants.*; 099 import static org.opends.server.util.ServerConstants.*; 100 import static org.opends.server.util.StaticUtils.*; 101 102 103 /** 104 * This class defines a backend to hold the Directory Server schema information. 105 * It is a kind of meta-backend in that it doesn't actually hold any data but 106 * rather dynamically generates the schema entry whenever it is requested. 107 */ 108 public class SchemaBackend 109 extends Backend 110 implements ConfigurationChangeListener<SchemaBackendCfg>, AlertGenerator 111 { 112 /** 113 * The tracer object for the debug logger. 114 */ 115 private static final DebugTracer TRACER = getTracer(); 116 117 118 /** 119 * The fully-qualified name of this class. 120 */ 121 private static final String CLASS_NAME = 122 "org.opends.server.backends.SchemaBackend"; 123 124 125 private static final String CONFIG_SCHEMA_ELEMENTS_FILE = "02-config.ldif"; 126 127 128 129 // The set of user-defined attributes that will be included in the schema 130 // entry. 131 private ArrayList<Attribute> userDefinedAttributes; 132 133 // The attribute type that will be used to include the defined attribute 134 // types. 135 private AttributeType attributeTypesType; 136 137 // The attribute type that will be used to hold the schema creation timestamp. 138 private AttributeType createTimestampType; 139 140 // The attribute type that will be used to hold the schema creator's name. 141 private AttributeType creatorsNameType; 142 143 // The attribute type that will be used to include the defined DIT content 144 // rules. 145 private AttributeType ditContentRulesType; 146 147 // The attribute type that will be used to include the defined DIT structure 148 // rules. 149 private AttributeType ditStructureRulesType; 150 151 // The attribute type that will be used to include the defined attribute 152 // syntaxes. 153 private AttributeType ldapSyntaxesType; 154 155 // The attribute type that will be used to include the defined matching rules. 156 private AttributeType matchingRulesType; 157 158 // The attribute type that will be used to include the defined matching rule 159 // uses. 160 private AttributeType matchingRuleUsesType; 161 162 // The attribute that will be used to hold the schema modifier's name. 163 private AttributeType modifiersNameType; 164 165 // The attribute type that will be used to hold the schema modification 166 // timestamp. 167 private AttributeType modifyTimestampType; 168 169 // The attribute type that will be used to include the defined object classes. 170 private AttributeType objectClassesType; 171 172 // The attribute type that will be used to include the defined name forms. 173 private AttributeType nameFormsType; 174 175 // The value containing DN of the user we'll say created the configuration. 176 private AttributeValue creatorsName; 177 178 // The value containing the DN of the last user to modify the configuration. 179 private AttributeValue modifiersName; 180 181 // The timestamp that will be used for the schema creation time. 182 private AttributeValue createTimestamp; 183 184 // The timestamp that will be used for the latest schema modification time. 185 private AttributeValue modifyTimestamp; 186 187 // Indicates whether the attributes of the schema entry should always be 188 // treated as user attributes even if they are defined as operational. 189 private boolean showAllAttributes; 190 191 // The DN of the configuration entry for this backend. 192 private DN configEntryDN; 193 194 // The current configuration state. 195 private SchemaBackendCfg currentConfig; 196 197 // The set of base DNs for this backend. 198 private DN[] baseDNs; 199 200 // The set of objectclasses that will be used in the schema entry. 201 private HashMap<ObjectClass,String> schemaObjectClasses; 202 203 // The set of supported controls for this backend. 204 private HashSet<String> supportedControls; 205 206 // The set of supported features for this backend. 207 private HashSet<String> supportedFeatures; 208 209 // The time that the schema was last modified. 210 private long modifyTime; 211 212 //Regular expression used to strip minimum upper bound value from 213 //syntax Attribute Type Description. The value looks like: {count}. 214 private String stripMinUpperBoundRegEx = "\\{\\d+\\}"; 215 216 217 218 /** 219 * Creates a new backend with the provided information. All backend 220 * implementations must implement a default constructor that use 221 * <CODE>super()</CODE> to invoke this constructor. 222 */ 223 public SchemaBackend() 224 { 225 super(); 226 227 // Perform all initialization in initializeBackend. 228 } 229 230 231 232 /** 233 * {@inheritDoc} 234 */ 235 @Override() 236 public void configureBackend(Configuration config) 237 throws ConfigException 238 { 239 // Make sure that a configuration entry was provided. If not, then we will 240 // not be able to complete initialization. 241 if (config == null) 242 { 243 Message message = ERR_SCHEMA_CONFIG_ENTRY_NULL.get(); 244 throw new ConfigException(message); 245 } 246 247 Validator.ensureTrue(config instanceof SchemaBackendCfg); 248 SchemaBackendCfg cfg = (SchemaBackendCfg)config; 249 ConfigEntry configEntry = DirectoryServer.getConfigEntry(cfg.dn()); 250 251 configEntryDN = configEntry.getDN(); 252 253 // Get all of the attribute types that we will use for schema elements. 254 attributeTypesType = 255 DirectoryServer.getAttributeType(ATTR_ATTRIBUTE_TYPES_LC, true); 256 objectClassesType = 257 DirectoryServer.getAttributeType(ATTR_OBJECTCLASSES_LC, true); 258 matchingRulesType = 259 DirectoryServer.getAttributeType(ATTR_MATCHING_RULES_LC, true); 260 ldapSyntaxesType = 261 DirectoryServer.getAttributeType(ATTR_LDAP_SYNTAXES_LC, true); 262 ditContentRulesType = 263 DirectoryServer.getAttributeType(ATTR_DIT_CONTENT_RULES_LC, true); 264 ditStructureRulesType = 265 DirectoryServer.getAttributeType(ATTR_DIT_STRUCTURE_RULES_LC, true); 266 matchingRuleUsesType = 267 DirectoryServer.getAttributeType(ATTR_MATCHING_RULE_USE_LC, true); 268 nameFormsType = DirectoryServer.getAttributeType(ATTR_NAME_FORMS_LC, true); 269 270 // Initialize the lastmod attributes. 271 creatorsNameType = 272 DirectoryServer.getAttributeType(OP_ATTR_CREATORS_NAME_LC, true); 273 createTimestampType = 274 DirectoryServer.getAttributeType(OP_ATTR_CREATE_TIMESTAMP_LC, true); 275 modifiersNameType = 276 DirectoryServer.getAttributeType(OP_ATTR_MODIFIERS_NAME_LC, true); 277 modifyTimestampType = 278 DirectoryServer.getAttributeType(OP_ATTR_MODIFY_TIMESTAMP_LC, true); 279 280 // Construct the set of objectclasses to include in the schema entry. 281 schemaObjectClasses = new LinkedHashMap<ObjectClass,String>(3); 282 schemaObjectClasses.put(DirectoryServer.getTopObjectClass(), OC_TOP); 283 284 ObjectClass subentryOC = DirectoryServer.getObjectClass(OC_LDAP_SUBENTRY_LC, 285 true); 286 schemaObjectClasses.put(subentryOC, OC_LDAP_SUBENTRY); 287 288 ObjectClass subschemaOC = DirectoryServer.getObjectClass(OC_SUBSCHEMA, 289 true); 290 schemaObjectClasses.put(subschemaOC, OC_SUBSCHEMA); 291 292 293 // Define empty sets for the supported controls and features. 294 supportedControls = new HashSet<String>(0); 295 supportedFeatures = new HashSet<String>(0); 296 297 298 configEntryDN = configEntry.getDN(); 299 300 DN[] baseDNs = new DN[cfg.getBaseDN().size()]; 301 cfg.getBaseDN().toArray(baseDNs); 302 this.baseDNs = baseDNs; 303 304 creatorsName = new AttributeValue(creatorsNameType, baseDNs[0].toString()); 305 modifiersName = 306 new AttributeValue(modifiersNameType, baseDNs[0].toString()); 307 308 long createTime = DirectoryServer.getSchema().getOldestModificationTime(); 309 createTimestamp = 310 GeneralizedTimeSyntax.createGeneralizedTimeValue(createTime); 311 312 long modifyTime = DirectoryServer.getSchema().getYoungestModificationTime(); 313 modifyTimestamp = 314 GeneralizedTimeSyntax.createGeneralizedTimeValue(modifyTime); 315 316 317 // Get the set of user-defined attributes for the configuration entry. Any 318 // attributes that we don't recognize will be included directly in the 319 // schema entry. 320 userDefinedAttributes = new ArrayList<Attribute>(); 321 for (List<Attribute> attrs : 322 configEntry.getEntry().getUserAttributes().values()) 323 { 324 for (Attribute a : attrs) 325 { 326 if (! isSchemaConfigAttribute(a)) 327 { 328 userDefinedAttributes.add(a); 329 } 330 } 331 } 332 for (List<Attribute> attrs : 333 configEntry.getEntry().getOperationalAttributes().values()) 334 { 335 for (Attribute a : attrs) 336 { 337 if (! isSchemaConfigAttribute(a)) 338 { 339 userDefinedAttributes.add(a); 340 } 341 } 342 } 343 344 345 // Determine whether to show all attributes. 346 showAllAttributes = cfg.isShowAllAttributes(); 347 348 349 currentConfig = cfg; 350 } 351 352 353 354 /** 355 * {@inheritDoc} 356 */ 357 @Override() 358 public void initializeBackend() 359 throws ConfigException, InitializationException 360 { 361 // Register each of the suffixes with the Directory Server. Also, register 362 // the first one as the schema base. 363 DirectoryServer.setSchemaDN(baseDNs[0]); 364 for (int i=0; i < baseDNs.length; i++) 365 { 366 try 367 { 368 DirectoryServer.registerBaseDN(baseDNs[i], this, true); 369 } 370 catch (Exception e) 371 { 372 if (debugEnabled()) 373 { 374 TRACER.debugCaught(DebugLogLevel.ERROR, e); 375 } 376 377 Message message = ERR_BACKEND_CANNOT_REGISTER_BASEDN.get( 378 baseDNs[i].toString(), getExceptionMessage(e)); 379 throw new InitializationException(message, e); 380 } 381 } 382 383 384 // Identify any differences that may exist between the concatenated schema 385 // file from the last online modification and the current schema files. If 386 // there are any differences, then they should be from making changes to the 387 // schema files with the server offline. 388 try 389 { 390 // First, generate lists of elements from the current schema. 391 LinkedHashSet<String> newATs = new LinkedHashSet<String>(); 392 LinkedHashSet<String> newOCs = new LinkedHashSet<String>(); 393 LinkedHashSet<String> newNFs = new LinkedHashSet<String>(); 394 LinkedHashSet<String> newDCRs = new LinkedHashSet<String>(); 395 LinkedHashSet<String> newDSRs = new LinkedHashSet<String>(); 396 LinkedHashSet<String> newMRUs = new LinkedHashSet<String>(); 397 Schema.genConcatenatedSchema(newATs, newOCs, newNFs, newDCRs, newDSRs, 398 newMRUs); 399 400 // Next, generate lists of elements from the previous concatenated schema. 401 // If there isn't a previous concatenated schema, then use the base 402 // schema for the current revision. 403 String concatFilePath; 404 File configFile = new File(DirectoryServer.getConfigFile()); 405 File configDirectory = configFile.getParentFile(); 406 File upgradeDirectory = new File(configDirectory, "upgrade"); 407 File concatFile = new File(upgradeDirectory, 408 SCHEMA_CONCAT_FILE_NAME); 409 if (concatFile.exists()) 410 { 411 concatFilePath = concatFile.getAbsolutePath(); 412 } 413 else 414 { 415 concatFile = new File(upgradeDirectory, 416 SCHEMA_BASE_FILE_NAME_WITHOUT_REVISION + 417 DynamicConstants.REVISION_NUMBER); 418 if (concatFile.exists()) 419 { 420 concatFilePath = concatFile.getAbsolutePath(); 421 } 422 else 423 { 424 String runningUnitTestsStr = 425 System.getProperty(PROPERTY_RUNNING_UNIT_TESTS); 426 if ((runningUnitTestsStr != null) && 427 runningUnitTestsStr.equalsIgnoreCase("true")) 428 { 429 Schema.writeConcatenatedSchema(); 430 concatFile = new File(upgradeDirectory, SCHEMA_CONCAT_FILE_NAME); 431 concatFilePath = concatFile.getAbsolutePath(); 432 } 433 else 434 { 435 Message message = ERR_SCHEMA_CANNOT_FIND_CONCAT_FILE. 436 get(upgradeDirectory.getAbsolutePath(), SCHEMA_CONCAT_FILE_NAME, 437 concatFile.getName()); 438 throw new InitializationException(message); 439 } 440 } 441 } 442 443 LinkedHashSet<String> oldATs = new LinkedHashSet<String>(); 444 LinkedHashSet<String> oldOCs = new LinkedHashSet<String>(); 445 LinkedHashSet<String> oldNFs = new LinkedHashSet<String>(); 446 LinkedHashSet<String> oldDCRs = new LinkedHashSet<String>(); 447 LinkedHashSet<String> oldDSRs = new LinkedHashSet<String>(); 448 LinkedHashSet<String> oldMRUs = new LinkedHashSet<String>(); 449 Schema.readConcatenatedSchema(concatFilePath, oldATs, oldOCs, oldNFs, 450 oldDCRs, oldDSRs, oldMRUs); 451 452 // Create a list of modifications and add any differences between the old 453 // and new schema into them. 454 LinkedList<Modification> mods = new LinkedList<Modification>(); 455 Schema.compareConcatenatedSchema(oldATs, newATs, attributeTypesType, 456 mods); 457 Schema.compareConcatenatedSchema(oldOCs, newOCs, objectClassesType, mods); 458 Schema.compareConcatenatedSchema(oldNFs, newNFs, nameFormsType, mods); 459 Schema.compareConcatenatedSchema(oldDCRs, newDCRs, ditContentRulesType, 460 mods); 461 Schema.compareConcatenatedSchema(oldDSRs, newDSRs, ditStructureRulesType, 462 mods); 463 Schema.compareConcatenatedSchema(oldMRUs, newMRUs, matchingRuleUsesType, 464 mods); 465 if (! mods.isEmpty()) 466 { 467 DirectoryServer.setOfflineSchemaChanges(mods); 468 469 // Write a new concatenated schema file with the most recent information 470 // so we don't re-find these same changes on the next startup. 471 Schema.writeConcatenatedSchema(); 472 } 473 } 474 catch (InitializationException ie) 475 { 476 throw ie; 477 } 478 catch (Exception e) 479 { 480 if (debugEnabled()) 481 { 482 TRACER.debugCaught(DebugLogLevel.ERROR, e); 483 } 484 485 Message message = ERR_SCHEMA_ERROR_DETERMINING_SCHEMA_CHANGES.get( 486 getExceptionMessage(e)); 487 ErrorLogger.logError(message); 488 } 489 490 491 // Register with the Directory Server as a configurable component. 492 currentConfig.addSchemaChangeListener(this); 493 } 494 495 496 497 /** 498 * {@inheritDoc} 499 */ 500 @Override() 501 public void finalizeBackend() 502 { 503 currentConfig.removeSchemaChangeListener(this); 504 505 for (DN baseDN : baseDNs) 506 { 507 try 508 { 509 DirectoryServer.deregisterBaseDN(baseDN); 510 } 511 catch (Exception e) 512 { 513 if (debugEnabled()) 514 { 515 TRACER.debugCaught(DebugLogLevel.ERROR, e); 516 } 517 } 518 } 519 } 520 521 522 523 /** 524 * Indicates whether the provided attribute is one that is used in the 525 * configuration of this backend. 526 * 527 * @param attribute The attribute for which to make the determination. 528 * 529 * @return <CODE>true</CODE> if the provided attribute is one that is used in 530 * the configuration of this backend, <CODE>false</CODE> if not. 531 */ 532 private boolean isSchemaConfigAttribute(Attribute attribute) 533 { 534 AttributeType attrType = attribute.getAttributeType(); 535 if (attrType.hasName(ATTR_SCHEMA_ENTRY_DN.toLowerCase()) || 536 attrType.hasName(ATTR_BACKEND_ENABLED.toLowerCase()) || 537 attrType.hasName(ATTR_BACKEND_CLASS.toLowerCase()) || 538 attrType.hasName(ATTR_BACKEND_ID.toLowerCase()) || 539 attrType.hasName(ATTR_BACKEND_BASE_DN.toLowerCase()) || 540 attrType.hasName(ATTR_BACKEND_WRITABILITY_MODE.toLowerCase()) || 541 attrType.hasName(ATTR_SCHEMA_SHOW_ALL_ATTRIBUTES.toLowerCase()) || 542 attrType.hasName(ATTR_COMMON_NAME) || 543 attrType.hasName(OP_ATTR_CREATORS_NAME_LC) || 544 attrType.hasName(OP_ATTR_CREATE_TIMESTAMP_LC) || 545 attrType.hasName(OP_ATTR_MODIFIERS_NAME_LC) || 546 attrType.hasName(OP_ATTR_MODIFY_TIMESTAMP_LC)) 547 { 548 return true; 549 } 550 551 return false; 552 } 553 554 555 556 /** 557 * {@inheritDoc} 558 */ 559 @Override() 560 public DN[] getBaseDNs() 561 { 562 return baseDNs; 563 } 564 565 566 567 /** 568 * {@inheritDoc} 569 */ 570 @Override() 571 public long getEntryCount() 572 { 573 // There is always only a single entry in this backend. 574 return 1; 575 } 576 577 578 579 /** 580 * {@inheritDoc} 581 */ 582 @Override() 583 public boolean isLocal() 584 { 585 // For the purposes of this method, this is a local backend. 586 return true; 587 } 588 589 590 591 /** 592 * {@inheritDoc} 593 */ 594 @Override() 595 public boolean isIndexed(AttributeType attributeType, IndexType indexType) 596 { 597 // All searches in this backend will always be considered indexed. 598 return true; 599 } 600 601 602 603 /** 604 * {@inheritDoc} 605 */ 606 @Override() 607 public ConditionResult hasSubordinates(DN entryDN) 608 throws DirectoryException 609 { 610 return ConditionResult.FALSE; 611 } 612 613 614 615 /** 616 * {@inheritDoc} 617 */ 618 @Override() 619 public long numSubordinates(DN entryDN, boolean subtree) 620 throws DirectoryException 621 { 622 return 0L; 623 } 624 625 626 627 /** 628 * {@inheritDoc} 629 */ 630 @Override() 631 public Entry getEntry(DN entryDN) 632 throws DirectoryException 633 { 634 // If the requested entry was one of the schema entries, then create and 635 // return it. 636 DN[] dnArray = baseDNs; 637 for (DN baseDN : dnArray) 638 { 639 if (entryDN.equals(baseDN)) 640 { 641 return getSchemaEntry(entryDN, false); 642 } 643 } 644 645 646 // There is never anything below the schema entries, so we will return null. 647 return null; 648 } 649 650 651 652 /** 653 * Generates and returns a schema entry for the Directory Server. 654 * 655 * @param entryDN The DN to use for the generated entry. 656 * @param includeSchemaFile A boolean indicating if the X-SCHEMA-FILE 657 * extension should be used when generating 658 * the entry. 659 * 660 * @return The schema entry that was generated. 661 */ 662 public Entry getSchemaEntry(DN entryDN, boolean includeSchemaFile) 663 { 664 LinkedHashMap<AttributeType,List<Attribute>> userAttrs = 665 new LinkedHashMap<AttributeType,List<Attribute>>(); 666 667 LinkedHashMap<AttributeType,List<Attribute>> operationalAttrs = 668 new LinkedHashMap<AttributeType,List<Attribute>>(); 669 670 671 // Add the RDN attribute(s) for the provided entry. 672 RDN rdn = entryDN.getRDN(); 673 if (rdn != null) 674 { 675 int numAVAs = rdn.getNumValues(); 676 for (int i=0; i < numAVAs; i++) 677 { 678 LinkedHashSet<AttributeValue> valueSet = 679 new LinkedHashSet<AttributeValue>(1); 680 valueSet.add(rdn.getAttributeValue(i)); 681 682 AttributeType attrType = rdn.getAttributeType(i); 683 String attrName = rdn.getAttributeName(i); 684 Attribute a = new Attribute(attrType, attrName, valueSet); 685 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 686 attrList.add(a); 687 688 if (attrType.isOperational()) 689 { 690 operationalAttrs.put(attrType, attrList); 691 } 692 else 693 { 694 userAttrs.put(attrType, attrList); 695 } 696 } 697 } 698 699 Schema schema = DirectoryServer.getSchema(); 700 701 // Add the "attributeTypes" attribute. 702 LinkedHashSet<AttributeValue> valueSet = 703 DirectoryServer.getAttributeTypeSet(); 704 705 // Add the file name to the description of the attribute type if this 706 // was requested by the caller. 707 if (includeSchemaFile) 708 { 709 LinkedHashSet<AttributeValue> newValueSet = 710 new LinkedHashSet<AttributeValue>(valueSet.size()); 711 712 for (AttributeValue value : valueSet) 713 { 714 try 715 { 716 // Build a new attribute from this value, 717 // get the File name from this attribute, build a new attribute 718 // including this file name. 719 AttributeType attrType = AttributeTypeSyntax.decodeAttributeType( 720 value.getValue(), schema, false); 721 attrType = DirectoryServer.getAttributeType(attrType.getOID()); 722 723 newValueSet.add(new AttributeValue( 724 attributeTypesType, attrType.getDefinitionWithFileName())); 725 } 726 catch (DirectoryException e) 727 { 728 newValueSet.add(value); 729 } 730 } 731 valueSet = newValueSet; 732 } 733 734 Attribute attr; 735 if(AttributeTypeSyntax.isStripSyntaxMinimumUpperBound()) 736 attr = stripMinUpperBoundValues(valueSet); 737 else 738 attr = new Attribute(attributeTypesType, ATTR_ATTRIBUTE_TYPES, 739 valueSet); 740 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 741 attrList.add(attr); 742 if (attributeTypesType.isOperational() && (! showAllAttributes)) 743 { 744 operationalAttrs.put(attributeTypesType, attrList); 745 } 746 else 747 { 748 userAttrs.put(attributeTypesType, attrList); 749 } 750 751 // Add the "objectClasses" attribute. 752 valueSet = DirectoryServer.getObjectClassSet(); 753 754 // Add the file name to the description if this was requested by the 755 // caller. 756 if (includeSchemaFile) 757 { 758 LinkedHashSet<AttributeValue> newValueSet = 759 new LinkedHashSet<AttributeValue>(valueSet.size()); 760 761 for (AttributeValue value : valueSet) 762 { 763 try 764 { 765 // Build a new attribute from this value, 766 // get the File name from this attribute, build a new attribute 767 // including this file name. 768 ObjectClass oc = ObjectClassSyntax.decodeObjectClass( 769 value.getValue(), schema, false); 770 oc = DirectoryServer.getObjectClass(oc.getOID()); 771 newValueSet.add(new AttributeValue( 772 objectClassesType, oc.getDefinitionWithFileName())); 773 } 774 catch (DirectoryException e) 775 { 776 newValueSet.add(value); 777 } 778 } 779 valueSet = newValueSet; 780 } 781 782 attr = new Attribute(objectClassesType, ATTR_OBJECTCLASSES, valueSet); 783 attrList = new ArrayList<Attribute>(1); 784 attrList.add(attr); 785 if (objectClassesType.isOperational() && (! showAllAttributes)) 786 { 787 operationalAttrs.put(objectClassesType, attrList); 788 } 789 else 790 { 791 userAttrs.put(objectClassesType, attrList); 792 } 793 794 795 // Add the "matchingRules" attribute. 796 valueSet = DirectoryServer.getMatchingRuleSet(); 797 attr = new Attribute(matchingRulesType, ATTR_MATCHING_RULES, valueSet); 798 attrList = new ArrayList<Attribute>(1); 799 attrList.add(attr); 800 if (matchingRulesType.isOperational() && (! showAllAttributes)) 801 { 802 operationalAttrs.put(matchingRulesType, attrList); 803 } 804 else 805 { 806 userAttrs.put(matchingRulesType, attrList); 807 } 808 809 810 // Add the "ldapSyntaxes" attribute. 811 valueSet = DirectoryServer.getAttributeSyntaxSet(); 812 attr = new Attribute(ldapSyntaxesType, ATTR_LDAP_SYNTAXES, valueSet); 813 attrList = new ArrayList<Attribute>(1); 814 attrList.add(attr); 815 816 // Note that we intentionally ignore showAllAttributes for attribute 817 // syntaxes, name forms, matching rule uses, DIT content rules, and DIT 818 // structure rules because those attributes aren't allowed in the subschema 819 // objectclass, and treating them as user attributes would cause schema 820 // updates to fail. This means that you'll always have to explicitly 821 // request these attributes in order to be able to see them. 822 if (ldapSyntaxesType.isOperational()) 823 { 824 operationalAttrs.put(ldapSyntaxesType, attrList); 825 } 826 else 827 { 828 userAttrs.put(ldapSyntaxesType, attrList); 829 } 830 831 832 // If there are any name forms defined, then add them. 833 valueSet = DirectoryServer.getNameFormSet(); 834 if (! valueSet.isEmpty()) 835 { 836 attr = new Attribute(nameFormsType, ATTR_NAME_FORMS, valueSet); 837 attrList = new ArrayList<Attribute>(1); 838 attrList.add(attr); 839 if (nameFormsType.isOperational()) 840 { 841 operationalAttrs.put(nameFormsType, attrList); 842 } 843 else 844 { 845 userAttrs.put(nameFormsType, attrList); 846 } 847 } 848 849 850 // If there are any DIT content rules defined, then add them. 851 valueSet = DirectoryServer.getDITContentRuleSet(); 852 if (! valueSet.isEmpty()) 853 { 854 attr = new Attribute(ditContentRulesType, ATTR_DIT_CONTENT_RULES, 855 valueSet); 856 attrList = new ArrayList<Attribute>(1); 857 attrList.add(attr); 858 if (ditContentRulesType.isOperational()) 859 { 860 operationalAttrs.put(ditContentRulesType, attrList); 861 } 862 else 863 { 864 userAttrs.put(ditContentRulesType, attrList); 865 } 866 } 867 868 869 // If there are any DIT structure rules defined, then add them. 870 valueSet = DirectoryServer.getDITStructureRuleSet(); 871 if (! valueSet.isEmpty()) 872 { 873 attr = new Attribute(ditStructureRulesType, ATTR_DIT_STRUCTURE_RULES, 874 valueSet); 875 attrList = new ArrayList<Attribute>(1); 876 attrList.add(attr); 877 if (ditStructureRulesType.isOperational()) 878 { 879 operationalAttrs.put(ditStructureRulesType, attrList); 880 } 881 else 882 { 883 userAttrs.put(ditStructureRulesType, attrList); 884 } 885 } 886 887 888 // If there are any matching rule uses defined, then add them. 889 valueSet = DirectoryServer.getMatchingRuleUseSet(); 890 if (! valueSet.isEmpty()) 891 { 892 attr = new Attribute(matchingRuleUsesType, ATTR_MATCHING_RULE_USE, 893 valueSet); 894 attrList = new ArrayList<Attribute>(1); 895 attrList.add(attr); 896 if (matchingRuleUsesType.isOperational()) 897 { 898 operationalAttrs.put(matchingRuleUsesType, attrList); 899 } 900 else 901 { 902 userAttrs.put(matchingRuleUsesType, attrList); 903 } 904 } 905 906 907 // Add the lastmod attributes. 908 valueSet = new LinkedHashSet<AttributeValue>(1); 909 valueSet.add(creatorsName); 910 attrList = new ArrayList<Attribute>(1); 911 attrList.add(new Attribute(creatorsNameType, OP_ATTR_CREATORS_NAME, 912 valueSet)); 913 operationalAttrs.put(creatorsNameType, attrList); 914 915 valueSet = new LinkedHashSet<AttributeValue>(1); 916 valueSet.add(createTimestamp); 917 attrList = new ArrayList<Attribute>(1); 918 attrList.add(new Attribute(createTimestampType, OP_ATTR_CREATE_TIMESTAMP, 919 valueSet)); 920 operationalAttrs.put(createTimestampType, attrList); 921 922 if (DirectoryServer.getSchema().getYoungestModificationTime() != modifyTime) 923 { 924 synchronized (this) 925 { 926 modifyTime = DirectoryServer.getSchema().getYoungestModificationTime(); 927 modifyTimestamp = 928 GeneralizedTimeSyntax.createGeneralizedTimeValue(modifyTime); 929 } 930 } 931 932 valueSet = new LinkedHashSet<AttributeValue>(1); 933 valueSet.add(modifiersName); 934 attrList = new ArrayList<Attribute>(1); 935 attrList.add(new Attribute(modifiersNameType, OP_ATTR_MODIFIERS_NAME, 936 valueSet)); 937 operationalAttrs.put(modifiersNameType, attrList); 938 939 valueSet = new LinkedHashSet<AttributeValue>(1); 940 valueSet.add(modifyTimestamp); 941 attrList = new ArrayList<Attribute>(1); 942 attrList.add(new Attribute(modifyTimestampType, OP_ATTR_MODIFY_TIMESTAMP, 943 valueSet)); 944 operationalAttrs.put(modifyTimestampType, attrList); 945 946 // Add the extra attributes. 947 Map<String, Attribute> attributes = 948 DirectoryServer.getSchema().getExtraAttributes(); 949 for (Attribute attribute : attributes.values()) 950 { 951 attrList = new ArrayList<Attribute>(1); 952 attrList.add(attribute); 953 operationalAttrs.put(attribute.getAttributeType(), attrList); 954 } 955 956 // Add all the user-defined attributes. 957 for (Attribute a : userDefinedAttributes) 958 { 959 AttributeType type = a.getAttributeType(); 960 961 if (type.isOperational()) 962 { 963 List<Attribute> attrs = operationalAttrs.get(type); 964 if (attrs == null) 965 { 966 attrs = new ArrayList<Attribute>(); 967 attrs.add(a); 968 operationalAttrs.put(type, attrs); 969 } 970 else 971 { 972 attrs.add(a); 973 } 974 } 975 else 976 { 977 List<Attribute> attrs = userAttrs.get(type); 978 if (attrs == null) 979 { 980 attrs = new ArrayList<Attribute>(); 981 attrs.add(a); 982 userAttrs.put(type, attrs); 983 } 984 else 985 { 986 attrs.add(a); 987 } 988 } 989 } 990 991 992 // Construct and return the entry. 993 Entry e = new Entry(entryDN, schemaObjectClasses, userAttrs, 994 operationalAttrs); 995 e.processVirtualAttributes(); 996 return e; 997 } 998 999 1000 1001 /** 1002 * {@inheritDoc} 1003 */ 1004 @Override() 1005 public boolean entryExists(DN entryDN) 1006 throws DirectoryException 1007 { 1008 // The specified DN must be one of the specified schema DNs. 1009 DN[] baseArray = baseDNs; 1010 for (DN baseDN : baseArray) 1011 { 1012 if (entryDN.equals(baseDN)) 1013 { 1014 return true; 1015 } 1016 } 1017 1018 return false; 1019 } 1020 1021 1022 1023 /** 1024 * {@inheritDoc} 1025 */ 1026 @Override() 1027 public void addEntry(Entry entry, AddOperation addOperation) 1028 throws DirectoryException 1029 { 1030 Message message = 1031 ERR_SCHEMA_ADD_NOT_SUPPORTED.get(String.valueOf(entry.getDN())); 1032 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1033 } 1034 1035 1036 1037 /** 1038 * {@inheritDoc} 1039 */ 1040 @Override() 1041 public void deleteEntry(DN entryDN, DeleteOperation deleteOperation) 1042 throws DirectoryException 1043 { 1044 Message message = 1045 ERR_SCHEMA_DELETE_NOT_SUPPORTED.get(String.valueOf(entryDN)); 1046 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1047 } 1048 1049 1050 1051 /** 1052 * {@inheritDoc} 1053 */ 1054 @Override() 1055 public void replaceEntry(Entry entry, ModifyOperation modifyOperation) 1056 throws DirectoryException 1057 { 1058 // Make sure that the authenticated user has the necessary UPDATE_SCHEMA 1059 // privilege. 1060 ClientConnection clientConnection = modifyOperation.getClientConnection(); 1061 if (! clientConnection.hasPrivilege(Privilege.UPDATE_SCHEMA, 1062 modifyOperation)) 1063 { 1064 Message message = ERR_SCHEMA_MODIFY_INSUFFICIENT_PRIVILEGES.get(); 1065 throw new DirectoryException(ResultCode.INSUFFICIENT_ACCESS_RIGHTS, 1066 message); 1067 } 1068 1069 1070 ArrayList<Modification> mods = 1071 new ArrayList<Modification>(modifyOperation.getModifications()); 1072 if (mods.isEmpty()) 1073 { 1074 // There aren't any modifications, so we don't need to do anything. 1075 return; 1076 } 1077 1078 Schema newSchema = DirectoryServer.getSchema().duplicate(); 1079 TreeSet<String> modifiedSchemaFiles = new TreeSet<String>(); 1080 1081 int pos = -1; 1082 Iterator<Modification> iterator = mods.iterator(); 1083 while (iterator.hasNext()) 1084 { 1085 Modification m = iterator.next(); 1086 pos++; 1087 1088 // Determine the type of modification to perform. We will support add and 1089 // delete operations in the schema, and we will also support the ability 1090 // to add a schema element that already exists and treat it as a 1091 // replacement of that existing element. 1092 Attribute a = m.getAttribute(); 1093 AttributeType at = a.getAttributeType(); 1094 switch (m.getModificationType()) 1095 { 1096 case ADD: 1097 LinkedHashSet<AttributeValue> values = a.getValues(); 1098 if (values.isEmpty()) 1099 { 1100 continue; 1101 } 1102 1103 if (at.equals(attributeTypesType)) 1104 { 1105 for (AttributeValue v : values) 1106 { 1107 AttributeType type; 1108 try 1109 { 1110 type = AttributeTypeSyntax.decodeAttributeType(v.getValue(), 1111 newSchema, false); 1112 } 1113 catch (DirectoryException de) 1114 { 1115 if (debugEnabled()) 1116 { 1117 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1118 } 1119 1120 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_ATTRTYPE.get( 1121 v.getStringValue(), de.getMessageObject()); 1122 throw new DirectoryException( 1123 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1124 de); 1125 } 1126 1127 addAttributeType(type, newSchema, modifiedSchemaFiles); 1128 } 1129 } 1130 else if (at.equals(objectClassesType)) 1131 { 1132 for (AttributeValue v : values) 1133 { 1134 ObjectClass oc; 1135 try 1136 { 1137 oc = ObjectClassSyntax.decodeObjectClass(v.getValue(), 1138 newSchema, false); 1139 } 1140 catch (DirectoryException de) 1141 { 1142 if (debugEnabled()) 1143 { 1144 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1145 } 1146 1147 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_OBJECTCLASS. 1148 get(v.getStringValue(), de.getMessageObject()); 1149 throw new DirectoryException( 1150 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1151 de); 1152 } 1153 1154 addObjectClass(oc, newSchema, modifiedSchemaFiles); 1155 } 1156 } 1157 else if (at.equals(nameFormsType)) 1158 { 1159 for (AttributeValue v : values) 1160 { 1161 NameForm nf; 1162 try 1163 { 1164 nf = NameFormSyntax.decodeNameForm(v.getValue(), newSchema, 1165 false); 1166 } 1167 catch (DirectoryException de) 1168 { 1169 if (debugEnabled()) 1170 { 1171 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1172 } 1173 1174 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_NAME_FORM.get( 1175 v.getStringValue(), de.getMessageObject()); 1176 throw new DirectoryException( 1177 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1178 de); 1179 } 1180 1181 addNameForm(nf, newSchema, modifiedSchemaFiles); 1182 } 1183 } 1184 else if (at.equals(ditContentRulesType)) 1185 { 1186 for (AttributeValue v : values) 1187 { 1188 DITContentRule dcr; 1189 try 1190 { 1191 dcr = DITContentRuleSyntax.decodeDITContentRule(v.getValue(), 1192 newSchema, false); 1193 } 1194 catch (DirectoryException de) 1195 { 1196 if (debugEnabled()) 1197 { 1198 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1199 } 1200 1201 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_DCR.get( 1202 v.getStringValue(), de.getMessageObject()); 1203 throw new DirectoryException( 1204 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1205 de); 1206 } 1207 1208 addDITContentRule(dcr, newSchema, modifiedSchemaFiles); 1209 } 1210 } 1211 else if (at.equals(ditStructureRulesType)) 1212 { 1213 for (AttributeValue v : values) 1214 { 1215 DITStructureRule dsr; 1216 try 1217 { 1218 dsr = DITStructureRuleSyntax.decodeDITStructureRule( 1219 v.getValue(), newSchema, false); 1220 } 1221 catch (DirectoryException de) 1222 { 1223 if (debugEnabled()) 1224 { 1225 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1226 } 1227 1228 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_DSR.get( 1229 v.getStringValue(), de.getMessageObject()); 1230 throw new DirectoryException( 1231 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1232 de); 1233 } 1234 1235 addDITStructureRule(dsr, newSchema, modifiedSchemaFiles); 1236 } 1237 } 1238 else if (at.equals(matchingRuleUsesType)) 1239 { 1240 for (AttributeValue v : values) 1241 { 1242 MatchingRuleUse mru; 1243 try 1244 { 1245 mru = MatchingRuleUseSyntax.decodeMatchingRuleUse(v.getValue(), 1246 newSchema, false); 1247 } 1248 catch (DirectoryException de) 1249 { 1250 if (debugEnabled()) 1251 { 1252 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1253 } 1254 1255 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_MR_USE.get( 1256 v.getStringValue(), de.getMessageObject()); 1257 throw new DirectoryException( 1258 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1259 de); 1260 } 1261 1262 addMatchingRuleUse(mru, newSchema, modifiedSchemaFiles); 1263 } 1264 } 1265 else 1266 { 1267 Message message = 1268 ERR_SCHEMA_MODIFY_UNSUPPORTED_ATTRIBUTE_TYPE.get(a.getName()); 1269 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1270 message); 1271 } 1272 1273 break; 1274 1275 1276 case DELETE: 1277 values = a.getValues(); 1278 if (values.isEmpty()) 1279 { 1280 Message message = 1281 ERR_SCHEMA_MODIFY_DELETE_NO_VALUES.get(a.getName()); 1282 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1283 message); 1284 } 1285 1286 if (at.equals(attributeTypesType)) 1287 { 1288 for (AttributeValue v : values) 1289 { 1290 AttributeType type; 1291 try 1292 { 1293 type = AttributeTypeSyntax.decodeAttributeType(v.getValue(), 1294 newSchema, false); 1295 } 1296 catch (DirectoryException de) 1297 { 1298 if (debugEnabled()) 1299 { 1300 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1301 } 1302 1303 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_ATTRTYPE.get( 1304 v.getStringValue(), de.getMessageObject()); 1305 throw new DirectoryException( 1306 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1307 de); 1308 } 1309 1310 removeAttributeType(type, newSchema, mods, pos, 1311 modifiedSchemaFiles); 1312 } 1313 } 1314 else if (at.equals(objectClassesType)) 1315 { 1316 for (AttributeValue v : values) 1317 { 1318 ObjectClass oc; 1319 try 1320 { 1321 oc = ObjectClassSyntax.decodeObjectClass(v.getValue(), 1322 newSchema, false); 1323 } 1324 catch (DirectoryException de) 1325 { 1326 if (debugEnabled()) 1327 { 1328 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1329 } 1330 1331 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_OBJECTCLASS. 1332 get(v.getStringValue(), de.getMessageObject()); 1333 throw new DirectoryException( 1334 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1335 de); 1336 } 1337 1338 removeObjectClass(oc, newSchema, mods, pos, modifiedSchemaFiles); 1339 } 1340 } 1341 else if (at.equals(nameFormsType)) 1342 { 1343 for (AttributeValue v : values) 1344 { 1345 NameForm nf; 1346 try 1347 { 1348 nf = NameFormSyntax.decodeNameForm(v.getValue(), newSchema, 1349 false); 1350 } 1351 catch (DirectoryException de) 1352 { 1353 if (debugEnabled()) 1354 { 1355 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1356 } 1357 1358 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_NAME_FORM.get( 1359 v.getStringValue(), de.getMessageObject()); 1360 throw new DirectoryException( 1361 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1362 de); 1363 } 1364 1365 removeNameForm(nf, newSchema, mods, pos, modifiedSchemaFiles); 1366 } 1367 } 1368 else if (at.equals(ditContentRulesType)) 1369 { 1370 for (AttributeValue v : values) 1371 { 1372 DITContentRule dcr; 1373 try 1374 { 1375 dcr = DITContentRuleSyntax.decodeDITContentRule(v.getValue(), 1376 newSchema, false); 1377 } 1378 catch (DirectoryException de) 1379 { 1380 if (debugEnabled()) 1381 { 1382 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1383 } 1384 1385 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_DCR.get( 1386 v.getStringValue(), de.getMessageObject()); 1387 throw new DirectoryException( 1388 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1389 de); 1390 } 1391 1392 removeDITContentRule(dcr, newSchema, mods, pos, 1393 modifiedSchemaFiles); 1394 } 1395 } 1396 else if (at.equals(ditStructureRulesType)) 1397 { 1398 for (AttributeValue v : values) 1399 { 1400 DITStructureRule dsr; 1401 try 1402 { 1403 dsr = DITStructureRuleSyntax.decodeDITStructureRule( 1404 v.getValue(), newSchema, false); 1405 } 1406 catch (DirectoryException de) 1407 { 1408 if (debugEnabled()) 1409 { 1410 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1411 } 1412 1413 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_DSR.get( 1414 v.getStringValue(), de.getMessageObject()); 1415 throw new DirectoryException( 1416 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1417 de); 1418 } 1419 1420 removeDITStructureRule(dsr, newSchema, mods, pos, 1421 modifiedSchemaFiles); 1422 } 1423 } 1424 else if (at.equals(matchingRuleUsesType)) 1425 { 1426 for (AttributeValue v : values) 1427 { 1428 MatchingRuleUse mru; 1429 try 1430 { 1431 mru = MatchingRuleUseSyntax.decodeMatchingRuleUse(v.getValue(), 1432 newSchema, false); 1433 } 1434 catch (DirectoryException de) 1435 { 1436 if (debugEnabled()) 1437 { 1438 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1439 } 1440 1441 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_MR_USE.get( 1442 v.getStringValue(), de.getMessageObject()); 1443 throw new DirectoryException( 1444 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1445 de); 1446 } 1447 1448 removeMatchingRuleUse(mru, newSchema, mods, pos, 1449 modifiedSchemaFiles); 1450 } 1451 } 1452 else 1453 { 1454 Message message = 1455 ERR_SCHEMA_MODIFY_UNSUPPORTED_ATTRIBUTE_TYPE.get(a.getName()); 1456 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1457 message); 1458 } 1459 1460 break; 1461 1462 1463 default: 1464 if ((!m.isInternal()) && 1465 (!modifyOperation.isSynchronizationOperation())) 1466 { 1467 Message message = ERR_SCHEMA_INVALID_MODIFICATION_TYPE.get( 1468 String.valueOf(m.getModificationType())); 1469 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1470 message); 1471 } 1472 else 1473 { 1474 newSchema.addExtraAttribute(at.getNameOrOID(), a); 1475 modifiedSchemaFiles.add(FILE_USER_SCHEMA_ELEMENTS); 1476 } 1477 } 1478 } 1479 1480 1481 // If we've gotten here, then everything looks OK, re-write all the 1482 // modified Schema Files. 1483 updateSchemaFiles(newSchema, modifiedSchemaFiles); 1484 1485 // Finally set DirectoryServer to use the new Schema. 1486 DirectoryServer.setSchema(newSchema); 1487 1488 1489 DN authzDN = modifyOperation.getAuthorizationDN(); 1490 if (authzDN == null) 1491 { 1492 authzDN = DN.nullDN(); 1493 } 1494 1495 modifiersName = new AttributeValue(modifiersNameType, authzDN.toString()); 1496 modifyTimestamp = GeneralizedTimeSyntax.createGeneralizedTimeValue( 1497 System.currentTimeMillis()); 1498 } 1499 1500 1501 1502 /** 1503 * Re-write all schema files using the provided new Schema and list of 1504 * modified files. 1505 * 1506 * @param newSchema The new schema that should be used. 1507 * 1508 * @param modifiedSchemaFiles The list of files that should be modified. 1509 * 1510 * @throws DirectoryException When the new file cannot be written. 1511 */ 1512 private void updateSchemaFiles( 1513 Schema newSchema, TreeSet<String> modifiedSchemaFiles) 1514 throws DirectoryException 1515 { 1516 // We'll re-write all 1517 // impacted schema files by first creating them in a temporary location 1518 // and then replacing the existing schema files with the new versions. 1519 // If all that goes successfully, then activate the new schema. 1520 HashMap<String,File> tempSchemaFiles = new HashMap<String,File>(); 1521 try 1522 { 1523 for (String schemaFile : modifiedSchemaFiles) 1524 { 1525 File tempSchemaFile = writeTempSchemaFile(newSchema, schemaFile); 1526 tempSchemaFiles.put(schemaFile, tempSchemaFile); 1527 } 1528 1529 installSchemaFiles(tempSchemaFiles); 1530 } 1531 catch (DirectoryException de) 1532 { 1533 if (debugEnabled()) 1534 { 1535 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1536 } 1537 1538 throw de; 1539 } 1540 catch (Exception e) 1541 { 1542 if (debugEnabled()) 1543 { 1544 TRACER.debugCaught(DebugLogLevel.ERROR, e); 1545 } 1546 1547 Message message = 1548 ERR_SCHEMA_MODIFY_CANNOT_WRITE_NEW_SCHEMA.get(getExceptionMessage(e)); 1549 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1550 message, e); 1551 } 1552 finally 1553 { 1554 cleanUpTempSchemaFiles(tempSchemaFiles); 1555 } 1556 1557 1558 // Create a single file with all of the concatenated schema information 1559 // that we can use on startup to detect whether the schema files have been 1560 // edited with the server offline. 1561 Schema.writeConcatenatedSchema(); 1562 } 1563 1564 1565 1566 /** 1567 * Handles all processing required for adding the provided attribute type to 1568 * the given schema, replacing an existing type if necessary, and ensuring all 1569 * other metadata is properly updated. 1570 * 1571 * @param attributeType The attribute type to add or replace in the 1572 * server schema. 1573 * @param schema The schema to which the attribute type should 1574 * be added. 1575 * @param modifiedSchemaFiles The names of the schema files containing 1576 * schema elements that have been updated as part 1577 * of the schema modification. 1578 * 1579 * @throws DirectoryException If a problem occurs while attempting to add 1580 * the provided attribute type to the server 1581 * schema. 1582 */ 1583 private void addAttributeType(AttributeType attributeType, Schema schema, 1584 Set<String> modifiedSchemaFiles) 1585 throws DirectoryException 1586 { 1587 // First, see if the specified attribute type already exists. We'll check 1588 // the OID and all of the names, which means that it's possible there could 1589 // be more than one match (although if there is, then we'll refuse the 1590 // operation). 1591 AttributeType existingType = 1592 schema.getAttributeType(attributeType.getOID()); 1593 for (String name : attributeType.getNormalizedNames()) 1594 { 1595 AttributeType t = schema.getAttributeType(name); 1596 if (t == null) 1597 { 1598 continue; 1599 } 1600 else if (existingType == null) 1601 { 1602 existingType = t; 1603 } 1604 else if (existingType != t) 1605 { 1606 // NOTE: We really do want to use "!=" instead of "! t.equals()" 1607 // because we want to check whether it's the same object instance, not 1608 // just a logical equivalent. 1609 Message message = ERR_SCHEMA_MODIFY_MULTIPLE_CONFLICTS_FOR_ADD_ATTRTYPE. 1610 get(attributeType.getNameOrOID(), existingType.getNameOrOID(), 1611 t.getNameOrOID()); 1612 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1613 } 1614 } 1615 1616 1617 // Make sure that the new attribute type doesn't reference an undefined 1618 // or OBSOLETE superior attribute type. 1619 AttributeType superiorType = attributeType.getSuperiorType(); 1620 if (superiorType != null) 1621 { 1622 if (! schema.hasAttributeType(superiorType.getOID())) 1623 { 1624 Message message = ERR_SCHEMA_MODIFY_UNDEFINED_SUPERIOR_ATTRIBUTE_TYPE. 1625 get(attributeType.getNameOrOID(), superiorType.getNameOrOID()); 1626 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1627 } 1628 else if (superiorType.isObsolete()) 1629 { 1630 Message message = ERR_SCHEMA_MODIFY_OBSOLETE_SUPERIOR_ATTRIBUTE_TYPE. 1631 get(attributeType.getNameOrOID(), superiorType.getNameOrOID()); 1632 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1633 } 1634 } 1635 1636 1637 // Make sure that none of the associated matching rules are marked OBSOLETE. 1638 MatchingRule mr = attributeType.getEqualityMatchingRule(); 1639 if ((mr != null) && mr.isObsolete()) 1640 { 1641 Message message = ERR_SCHEMA_MODIFY_ATTRTYPE_OBSOLETE_MR.get( 1642 attributeType.getNameOrOID(), mr.getNameOrOID()); 1643 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1644 } 1645 1646 mr = attributeType.getOrderingMatchingRule(); 1647 if ((mr != null) && mr.isObsolete()) 1648 { 1649 Message message = ERR_SCHEMA_MODIFY_ATTRTYPE_OBSOLETE_MR.get( 1650 attributeType.getNameOrOID(), mr.getNameOrOID()); 1651 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1652 } 1653 1654 mr = attributeType.getSubstringMatchingRule(); 1655 if ((mr != null) && mr.isObsolete()) 1656 { 1657 Message message = ERR_SCHEMA_MODIFY_ATTRTYPE_OBSOLETE_MR.get( 1658 attributeType.getNameOrOID(), mr.getNameOrOID()); 1659 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1660 } 1661 1662 mr = attributeType.getApproximateMatchingRule(); 1663 if ((mr != null) && mr.isObsolete()) 1664 { 1665 Message message = ERR_SCHEMA_MODIFY_ATTRTYPE_OBSOLETE_MR.get( 1666 attributeType.getNameOrOID(), mr.getNameOrOID()); 1667 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1668 } 1669 1670 1671 // If there is no existing type, then we're adding a new attribute. 1672 // Otherwise, we're replacing an existing one. 1673 if (existingType == null) 1674 { 1675 schema.registerAttributeType(attributeType, false); 1676 String schemaFile = attributeType.getSchemaFile(); 1677 if ((schemaFile == null) || (schemaFile.length() == 0)) 1678 { 1679 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 1680 attributeType.setSchemaFile(schemaFile); 1681 } 1682 1683 modifiedSchemaFiles.add(schemaFile); 1684 } 1685 else 1686 { 1687 schema.deregisterAttributeType(existingType); 1688 schema.registerAttributeType(attributeType, false); 1689 schema.rebuildDependentElements(existingType); 1690 1691 if ((attributeType.getSchemaFile() == null) || 1692 (attributeType.getSchemaFile().length() == 0)) 1693 { 1694 String schemaFile = existingType.getSchemaFile(); 1695 if ((schemaFile == null) || (schemaFile.length() == 0)) 1696 { 1697 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 1698 } 1699 1700 attributeType.setSchemaFile(schemaFile); 1701 modifiedSchemaFiles.add(schemaFile); 1702 } 1703 else 1704 { 1705 String newSchemaFile = attributeType.getSchemaFile(); 1706 String oldSchemaFile = existingType.getSchemaFile(); 1707 if ((oldSchemaFile == null) || oldSchemaFile.equals(newSchemaFile)) 1708 { 1709 modifiedSchemaFiles.add(newSchemaFile); 1710 } 1711 else 1712 { 1713 modifiedSchemaFiles.add(newSchemaFile); 1714 modifiedSchemaFiles.add(oldSchemaFile); 1715 } 1716 } 1717 } 1718 } 1719 1720 1721 1722 /** 1723 * Handles all processing required to remove the provided attribute type from 1724 * the server schema, ensuring all other metadata is properly updated. Note 1725 * that this method will first check to see whether the same attribute type 1726 * will be later added to the server schema with an updated definition, and if 1727 * so then the removal will be ignored because the later add will be handled 1728 * as a replace. If the attribute type will not be replaced with a new 1729 * definition, then this method will ensure that there are no other schema 1730 * elements that depend on the attribute type before allowing it to be 1731 * removed. 1732 * 1733 * @param attributeType The attribute type to remove from the server 1734 * schema. 1735 * @param schema The schema from which the attribute type 1736 * should be removed. 1737 * @param modifications The full set of modifications to be processed 1738 * against the server schema. 1739 * @param currentPosition The position of the modification currently 1740 * being performed. 1741 * @param modifiedSchemaFiles The names of the schema files containing 1742 * schema elements that have been updated as part 1743 * of the schema modification. 1744 * 1745 * @throws DirectoryException If a problem occurs while attempting to remove 1746 * the provided attribute type from the server 1747 * schema. 1748 */ 1749 private void removeAttributeType(AttributeType attributeType, Schema schema, 1750 ArrayList<Modification> modifications, 1751 int currentPosition, 1752 Set<String> modifiedSchemaFiles) 1753 throws DirectoryException 1754 { 1755 // See if the specified attribute type is actually defined in the server 1756 // schema. If not, then fail. 1757 AttributeType removeType = schema.getAttributeType(attributeType.getOID()); 1758 if ((removeType == null) || (! removeType.equals(attributeType))) 1759 { 1760 Message message = ERR_SCHEMA_MODIFY_REMOVE_NO_SUCH_ATTRIBUTE_TYPE.get( 1761 attributeType.getNameOrOID()); 1762 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1763 } 1764 1765 1766 // See if there is another modification later to add the attribute type back 1767 // into the schema. If so, then it's a replace and we should ignore the 1768 // remove because adding it back will handle the replace. 1769 for (int i=currentPosition+1; i < modifications.size(); i++) 1770 { 1771 Modification m = modifications.get(i); 1772 Attribute a = m.getAttribute(); 1773 1774 if ((m.getModificationType() != ModificationType.ADD) || 1775 (! a.getAttributeType().equals(attributeTypesType))) 1776 { 1777 continue; 1778 } 1779 1780 for (AttributeValue v : a.getValues()) 1781 { 1782 AttributeType at; 1783 try 1784 { 1785 at = AttributeTypeSyntax.decodeAttributeType(v.getValue(), schema, 1786 true); 1787 } 1788 catch (DirectoryException de) 1789 { 1790 if (debugEnabled()) 1791 { 1792 TRACER.debugCaught(DebugLogLevel.ERROR, de); 1793 } 1794 1795 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_ATTRTYPE.get( 1796 v.getStringValue(), de.getMessageObject()); 1797 throw new DirectoryException( 1798 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 1799 de); 1800 } 1801 1802 if (attributeType.getOID().equals(at.getOID())) 1803 { 1804 // We found a match where the attribute type is added back later, so 1805 // we don't need to do anything else here. 1806 return; 1807 } 1808 } 1809 } 1810 1811 1812 // Make sure that the attribute type isn't used as the superior type for 1813 // any other attributes. 1814 for (AttributeType at : schema.getAttributeTypes().values()) 1815 { 1816 AttributeType superiorType = at.getSuperiorType(); 1817 if ((superiorType != null) && superiorType.equals(removeType)) 1818 { 1819 Message message = ERR_SCHEMA_MODIFY_REMOVE_AT_SUPERIOR_TYPE.get( 1820 removeType.getNameOrOID(), superiorType.getNameOrOID()); 1821 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1822 } 1823 } 1824 1825 1826 // Make sure that the attribute type isn't used as a required or optional 1827 // attribute type in any objectclass. 1828 for (ObjectClass oc : schema.getObjectClasses().values()) 1829 { 1830 if (oc.getRequiredAttributes().contains(removeType) || 1831 oc.getOptionalAttributes().contains(removeType)) 1832 { 1833 Message message = ERR_SCHEMA_MODIFY_REMOVE_AT_IN_OC.get( 1834 removeType.getNameOrOID(), oc.getNameOrOID()); 1835 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1836 } 1837 } 1838 1839 1840 // Make sure that the attribute type isn't used as a required or optional 1841 // attribute type in any name form. 1842 for (NameForm nf : schema.getNameFormsByObjectClass().values()) 1843 { 1844 if (nf.getRequiredAttributes().contains(removeType) || 1845 nf.getOptionalAttributes().contains(removeType)) 1846 { 1847 Message message = ERR_SCHEMA_MODIFY_REMOVE_AT_IN_NF.get( 1848 removeType.getNameOrOID(), nf.getNameOrOID()); 1849 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1850 } 1851 } 1852 1853 1854 // Make sure that the attribute type isn't used as a required, optional, or 1855 // prohibited attribute type in any DIT content rule. 1856 for (DITContentRule dcr : schema.getDITContentRules().values()) 1857 { 1858 if (dcr.getRequiredAttributes().contains(removeType) || 1859 dcr.getOptionalAttributes().contains(removeType) || 1860 dcr.getProhibitedAttributes().contains(removeType)) 1861 { 1862 Message message = ERR_SCHEMA_MODIFY_REMOVE_AT_IN_DCR.get( 1863 removeType.getNameOrOID(), dcr.getName()); 1864 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1865 } 1866 } 1867 1868 1869 // Make sure that the attribute type isn't referenced by any matching rule 1870 // use. 1871 for (MatchingRuleUse mru : schema.getMatchingRuleUses().values()) 1872 { 1873 if (mru.getAttributes().contains(removeType)) 1874 { 1875 Message message = ERR_SCHEMA_MODIFY_REMOVE_AT_IN_MR_USE.get( 1876 removeType.getNameOrOID(), mru.getName()); 1877 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1878 } 1879 } 1880 1881 1882 // If we've gotten here, then it's OK to remove the attribute type from 1883 // the schema. 1884 schema.deregisterAttributeType(removeType); 1885 String schemaFile = removeType.getSchemaFile(); 1886 if (schemaFile != null) 1887 { 1888 modifiedSchemaFiles.add(schemaFile); 1889 } 1890 } 1891 1892 1893 1894 /** 1895 * Handles all processing required for adding the provided objectclass to the 1896 * given schema, replacing an existing class if necessary, and ensuring 1897 * all other metadata is properly updated. 1898 * 1899 * @param objectClass The objectclass to add or replace in the 1900 * server schema. 1901 * @param schema The schema to which the objectclass should be 1902 * added. 1903 * @param modifiedSchemaFiles The names of the schema files containing 1904 * schema elements that have been updated as part 1905 * of the schema modification. 1906 * 1907 * @throws DirectoryException If a problem occurs while attempting to add 1908 * the provided objectclass to the server schema. 1909 */ 1910 private void addObjectClass(ObjectClass objectClass, Schema schema, 1911 Set<String> modifiedSchemaFiles) 1912 throws DirectoryException 1913 { 1914 // First, see if the specified objectclass already exists. We'll check the 1915 // OID and all of the names, which means that it's possible there could be 1916 // more than one match (although if there is, then we'll refuse the 1917 // operation). 1918 ObjectClass existingClass = 1919 schema.getObjectClass(objectClass.getOID()); 1920 for (String name : objectClass.getNormalizedNames()) 1921 { 1922 ObjectClass oc = schema.getObjectClass(name); 1923 if (oc == null) 1924 { 1925 continue; 1926 } 1927 else if (existingClass == null) 1928 { 1929 existingClass = oc; 1930 } 1931 else if (existingClass != oc) 1932 { 1933 // NOTE: We really do want to use "!=" instead of "! t.equals()" 1934 // because we want to check whether it's the same object instance, not 1935 // just a logical equivalent. 1936 Message message = 1937 ERR_SCHEMA_MODIFY_MULTIPLE_CONFLICTS_FOR_ADD_OBJECTCLASS 1938 .get(objectClass.getNameOrOID(), 1939 existingClass.getNameOrOID(), 1940 oc.getNameOrOID()); 1941 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1942 } 1943 } 1944 1945 1946 // Make sure that the new objectclass doesn't reference an undefined 1947 // superior class, or an undefined required or optional attribute type, 1948 // and that none of them are OBSOLETE. 1949 ObjectClass superiorClass = objectClass.getSuperiorClass(); 1950 if (superiorClass != null) 1951 { 1952 if (! schema.hasObjectClass(superiorClass.getOID())) 1953 { 1954 Message message = ERR_SCHEMA_MODIFY_UNDEFINED_SUPERIOR_OBJECTCLASS.get( 1955 objectClass.getNameOrOID(), superiorClass.getNameOrOID()); 1956 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1957 } 1958 else if (superiorClass.isObsolete()) 1959 { 1960 Message message = ERR_SCHEMA_MODIFY_OBSOLETE_SUPERIOR_OBJECTCLASS.get( 1961 objectClass.getNameOrOID(), superiorClass.getNameOrOID()); 1962 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1963 } 1964 } 1965 1966 for (AttributeType at : objectClass.getRequiredAttributes()) 1967 { 1968 if (! schema.hasAttributeType(at.getOID())) 1969 { 1970 Message message = ERR_SCHEMA_MODIFY_OC_UNDEFINED_REQUIRED_ATTR.get( 1971 objectClass.getNameOrOID(), at.getNameOrOID()); 1972 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1973 } 1974 else if (at.isObsolete()) 1975 { 1976 Message message = ERR_SCHEMA_MODIFY_OC_OBSOLETE_REQUIRED_ATTR.get( 1977 objectClass.getNameOrOID(), at.getNameOrOID()); 1978 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1979 } 1980 } 1981 1982 for (AttributeType at : objectClass.getOptionalAttributes()) 1983 { 1984 if (! schema.hasAttributeType(at.getOID())) 1985 { 1986 Message message = ERR_SCHEMA_MODIFY_OC_UNDEFINED_OPTIONAL_ATTR.get( 1987 objectClass.getNameOrOID(), at.getNameOrOID()); 1988 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 1989 } 1990 else if (at.isObsolete()) 1991 { 1992 Message message = ERR_SCHEMA_MODIFY_OC_OBSOLETE_OPTIONAL_ATTR.get( 1993 objectClass.getNameOrOID(), at.getNameOrOID()); 1994 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 1995 } 1996 } 1997 1998 1999 // If there is no existing class, then we're adding a new objectclass. 2000 // Otherwise, we're replacing an existing one. 2001 if (existingClass == null) 2002 { 2003 schema.registerObjectClass(objectClass, false); 2004 String schemaFile = objectClass.getSchemaFile(); 2005 if ((schemaFile == null) || (schemaFile.length() == 0)) 2006 { 2007 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2008 objectClass.setSchemaFile(schemaFile); 2009 } 2010 2011 modifiedSchemaFiles.add(schemaFile); 2012 } 2013 else 2014 { 2015 schema.deregisterObjectClass(existingClass); 2016 schema.registerObjectClass(objectClass, false); 2017 schema.rebuildDependentElements(existingClass); 2018 2019 if ((objectClass.getSchemaFile() == null) || 2020 (objectClass.getSchemaFile().length() == 0)) 2021 { 2022 String schemaFile = existingClass.getSchemaFile(); 2023 if ((schemaFile == null) || (schemaFile.length() == 0)) 2024 { 2025 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2026 } 2027 2028 objectClass.setSchemaFile(schemaFile); 2029 modifiedSchemaFiles.add(schemaFile); 2030 } 2031 else 2032 { 2033 String newSchemaFile = objectClass.getSchemaFile(); 2034 String oldSchemaFile = existingClass.getSchemaFile(); 2035 if ((oldSchemaFile == null) || oldSchemaFile.equals(newSchemaFile)) 2036 { 2037 modifiedSchemaFiles.add(newSchemaFile); 2038 } 2039 else 2040 { 2041 modifiedSchemaFiles.add(newSchemaFile); 2042 modifiedSchemaFiles.add(oldSchemaFile); 2043 } 2044 } 2045 } 2046 } 2047 2048 2049 2050 /** 2051 * Handles all processing required to remove the provided objectclass from the 2052 * server schema, ensuring all other metadata is properly updated. Note that 2053 * this method will first check to see whether the same objectclass will be 2054 * later added to the server schema with an updated definition, and if so then 2055 * the removal will be ignored because the later add will be handled as a 2056 * replace. If the objectclass will not be replaced with a new definition, 2057 * then this method will ensure that there are no other schema elements that 2058 * depend on the objectclass before allowing it to be removed. 2059 * 2060 * @param objectClass The objectclass to remove from the server 2061 * schema. 2062 * @param schema The schema from which the objectclass should 2063 * be removed. 2064 * @param modifications The full set of modifications to be processed 2065 * against the server schema. 2066 * @param currentPosition The position of the modification currently 2067 * being performed. 2068 * @param modifiedSchemaFiles The names of the schema files containing 2069 * schema elements that have been updated as part 2070 * of the schema modification. 2071 * 2072 * @throws DirectoryException If a problem occurs while attempting to remove 2073 * the provided objectclass from the server 2074 * schema. 2075 */ 2076 private void removeObjectClass(ObjectClass objectClass, Schema schema, 2077 ArrayList<Modification> modifications, 2078 int currentPosition, 2079 Set<String> modifiedSchemaFiles) 2080 throws DirectoryException 2081 { 2082 // See if the specified objectclass is actually defined in the server 2083 // schema. If not, then fail. 2084 ObjectClass removeClass = schema.getObjectClass(objectClass.getOID()); 2085 if ((removeClass == null) || (! removeClass.equals(objectClass))) 2086 { 2087 Message message = ERR_SCHEMA_MODIFY_REMOVE_NO_SUCH_OBJECTCLASS.get( 2088 objectClass.getNameOrOID()); 2089 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2090 } 2091 2092 2093 // See if there is another modification later to add the objectclass back 2094 // into the schema. If so, then it's a replace and we should ignore the 2095 // remove because adding it back will handle the replace. 2096 for (int i=currentPosition+1; i < modifications.size(); i++) 2097 { 2098 Modification m = modifications.get(i); 2099 Attribute a = m.getAttribute(); 2100 2101 if ((m.getModificationType() != ModificationType.ADD) || 2102 (! a.getAttributeType().equals(objectClassesType))) 2103 { 2104 continue; 2105 } 2106 2107 for (AttributeValue v : a.getValues()) 2108 { 2109 ObjectClass oc; 2110 try 2111 { 2112 oc = ObjectClassSyntax.decodeObjectClass(v.getValue(), schema, true); 2113 } 2114 catch (DirectoryException de) 2115 { 2116 if (debugEnabled()) 2117 { 2118 TRACER.debugCaught(DebugLogLevel.ERROR, de); 2119 } 2120 2121 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_OBJECTCLASS.get( 2122 v.getStringValue(), de.getMessageObject()); 2123 throw new DirectoryException( 2124 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 2125 de); 2126 } 2127 2128 if (objectClass.getOID().equals(oc.getOID())) 2129 { 2130 // We found a match where the objectClass is added back later, so we 2131 // don't need to do anything else here. 2132 return; 2133 } 2134 } 2135 } 2136 2137 2138 // Make sure that the objectclass isn't used as the superior class for any 2139 // other objectclass. 2140 for (ObjectClass oc : schema.getObjectClasses().values()) 2141 { 2142 ObjectClass superiorClass = oc.getSuperiorClass(); 2143 if ((superiorClass != null) && superiorClass.equals(removeClass)) 2144 { 2145 Message message = ERR_SCHEMA_MODIFY_REMOVE_OC_SUPERIOR_CLASS.get( 2146 removeClass.getNameOrOID(), superiorClass.getNameOrOID()); 2147 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2148 } 2149 } 2150 2151 2152 // Make sure that the objectclass isn't used as the structural class for 2153 // any name form. 2154 NameForm nf = schema.getNameForm(removeClass); 2155 if (nf != null) 2156 { 2157 Message message = ERR_SCHEMA_MODIFY_REMOVE_OC_IN_NF.get( 2158 removeClass.getNameOrOID(), nf.getNameOrOID()); 2159 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2160 } 2161 2162 2163 // Make sure that the objectclass isn't used as a structural or auxiliary 2164 // class for any DIT content rule. 2165 for (DITContentRule dcr : schema.getDITContentRules().values()) 2166 { 2167 if (dcr.getStructuralClass().equals(removeClass) || 2168 dcr.getAuxiliaryClasses().contains(removeClass)) 2169 { 2170 Message message = ERR_SCHEMA_MODIFY_REMOVE_OC_IN_DCR.get( 2171 removeClass.getNameOrOID(), dcr.getName()); 2172 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2173 } 2174 } 2175 2176 2177 // If we've gotten here, then it's OK to remove the objectclass from the 2178 // schema. 2179 schema.deregisterObjectClass(removeClass); 2180 String schemaFile = removeClass.getSchemaFile(); 2181 if (schemaFile != null) 2182 { 2183 modifiedSchemaFiles.add(schemaFile); 2184 } 2185 } 2186 2187 2188 2189 /** 2190 * Handles all processing required for adding the provided name form to the 2191 * the given schema, replacing an existing name form if necessary, and 2192 * ensuring all other metadata is properly updated. 2193 * 2194 * @param nameForm The name form to add or replace in the server 2195 * schema. 2196 * @param schema The schema to which the name form should be 2197 * added. 2198 * @param modifiedSchemaFiles The names of the schema files containing 2199 * schema elements that have been updated as part 2200 * of the schema modification. 2201 * 2202 * @throws DirectoryException If a problem occurs while attempting to add 2203 * the provided name form to the server schema. 2204 */ 2205 private void addNameForm(NameForm nameForm, Schema schema, 2206 Set<String> modifiedSchemaFiles) 2207 throws DirectoryException 2208 { 2209 // First, see if the specified name form already exists. We'll check the 2210 // OID and all of the names, which means that it's possible there could be 2211 // more than one match (although if there is, then we'll refuse the 2212 // operation). 2213 NameForm existingNF = 2214 schema.getNameForm(nameForm.getOID()); 2215 for (String name : nameForm.getNames().keySet()) 2216 { 2217 NameForm nf = schema.getNameForm(name); 2218 if (nf == null) 2219 { 2220 continue; 2221 } 2222 else if (existingNF == null) 2223 { 2224 existingNF = nf; 2225 } 2226 else if (existingNF != nf) 2227 { 2228 // NOTE: We really do want to use "!=" instead of "! t.equals()" 2229 // because we want to check whether it's the same object instance, not 2230 // just a logical equivalent. 2231 Message message = 2232 ERR_SCHEMA_MODIFY_MULTIPLE_CONFLICTS_FOR_ADD_NAME_FORM 2233 .get(nameForm.getNameOrOID(), existingNF.getNameOrOID(), 2234 nf.getNameOrOID()); 2235 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2236 } 2237 } 2238 2239 2240 // Make sure that the new name form doesn't reference an undefined 2241 // structural class, or an undefined required or optional attribute type, or 2242 // that any of them are marked OBSOLETE. 2243 ObjectClass structuralClass = nameForm.getStructuralClass(); 2244 if (! schema.hasObjectClass(structuralClass.getOID())) 2245 { 2246 Message message = ERR_SCHEMA_MODIFY_NF_UNDEFINED_STRUCTURAL_OC.get( 2247 nameForm.getNameOrOID(), structuralClass.getNameOrOID()); 2248 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2249 } 2250 if (structuralClass.getObjectClassType() != ObjectClassType.STRUCTURAL) 2251 { 2252 Message message = ERR_SCHEMA_MODIFY_NF_OC_NOT_STRUCTURAL.get( 2253 nameForm.getNameOrOID(), structuralClass.getNameOrOID()); 2254 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2255 } 2256 if (structuralClass.isObsolete()) 2257 { 2258 Message message = ERR_SCHEMA_MODIFY_NF_OC_OBSOLETE.get( 2259 nameForm.getNameOrOID(), structuralClass.getNameOrOID()); 2260 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2261 } 2262 2263 NameForm existingNFForClass = schema.getNameForm(structuralClass); 2264 if ((existingNFForClass != null) && (existingNFForClass != existingNF)) 2265 { 2266 Message message = ERR_SCHEMA_MODIFY_STRUCTURAL_OC_CONFLICT_FOR_ADD_NF. 2267 get(nameForm.getNameOrOID(), structuralClass.getNameOrOID(), 2268 existingNFForClass.getNameOrOID()); 2269 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2270 } 2271 2272 for (AttributeType at : nameForm.getRequiredAttributes()) 2273 { 2274 if (! schema.hasAttributeType(at.getOID())) 2275 { 2276 Message message = ERR_SCHEMA_MODIFY_NF_UNDEFINED_REQUIRED_ATTR.get( 2277 nameForm.getNameOrOID(), at.getNameOrOID()); 2278 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2279 } 2280 else if (at.isObsolete()) 2281 { 2282 Message message = ERR_SCHEMA_MODIFY_NF_OBSOLETE_REQUIRED_ATTR.get( 2283 nameForm.getNameOrOID(), at.getNameOrOID()); 2284 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2285 } 2286 } 2287 2288 for (AttributeType at : nameForm.getOptionalAttributes()) 2289 { 2290 if (! schema.hasAttributeType(at.getOID())) 2291 { 2292 Message message = ERR_SCHEMA_MODIFY_NF_UNDEFINED_OPTIONAL_ATTR.get( 2293 nameForm.getNameOrOID(), at.getNameOrOID()); 2294 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2295 } 2296 else if (at.isObsolete()) 2297 { 2298 Message message = ERR_SCHEMA_MODIFY_NF_OBSOLETE_OPTIONAL_ATTR.get( 2299 nameForm.getNameOrOID(), at.getNameOrOID()); 2300 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2301 } 2302 } 2303 2304 2305 // If there is no existing class, then we're adding a new name form. 2306 // Otherwise, we're replacing an existing one. 2307 if (existingNF == null) 2308 { 2309 schema.registerNameForm(nameForm, false); 2310 String schemaFile = nameForm.getSchemaFile(); 2311 if ((schemaFile == null) || (schemaFile.length() == 0)) 2312 { 2313 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2314 nameForm.setSchemaFile(schemaFile); 2315 } 2316 2317 modifiedSchemaFiles.add(schemaFile); 2318 } 2319 else 2320 { 2321 schema.deregisterNameForm(existingNF); 2322 schema.registerNameForm(nameForm, false); 2323 schema.rebuildDependentElements(existingNF); 2324 2325 if ((nameForm.getSchemaFile() == null) || 2326 (nameForm.getSchemaFile().length() == 0)) 2327 { 2328 String schemaFile = existingNF.getSchemaFile(); 2329 if ((schemaFile == null) || (schemaFile.length() == 0)) 2330 { 2331 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2332 } 2333 2334 nameForm.setSchemaFile(schemaFile); 2335 modifiedSchemaFiles.add(schemaFile); 2336 } 2337 else 2338 { 2339 String newSchemaFile = nameForm.getSchemaFile(); 2340 String oldSchemaFile = existingNF.getSchemaFile(); 2341 if ((oldSchemaFile == null) || oldSchemaFile.equals(newSchemaFile)) 2342 { 2343 modifiedSchemaFiles.add(newSchemaFile); 2344 } 2345 else 2346 { 2347 modifiedSchemaFiles.add(newSchemaFile); 2348 modifiedSchemaFiles.add(oldSchemaFile); 2349 } 2350 } 2351 } 2352 } 2353 2354 2355 2356 /** 2357 * Handles all processing required to remove the provided name form from the 2358 * server schema, ensuring all other metadata is properly updated. Note that 2359 * this method will first check to see whether the same name form will be 2360 * later added to the server schema with an updated definition, and if so then 2361 * the removal will be ignored because the later add will be handled as a 2362 * replace. If the name form will not be replaced with a new definition, then 2363 * this method will ensure that there are no other schema elements that depend 2364 * on the name form before allowing it to be removed. 2365 * 2366 * @param nameForm The name form to remove from the server 2367 * schema. 2368 * @param schema The schema from which the name form should be 2369 * be removed. 2370 * @param modifications The full set of modifications to be processed 2371 * against the server schema. 2372 * @param currentPosition The position of the modification currently 2373 * being performed. 2374 * @param modifiedSchemaFiles The names of the schema files containing 2375 * schema elements that have been updated as part 2376 * of the schema modification. 2377 * 2378 * @throws DirectoryException If a problem occurs while attempting to remove 2379 * the provided name form from the server schema. 2380 */ 2381 private void removeNameForm(NameForm nameForm, Schema schema, 2382 ArrayList<Modification> modifications, 2383 int currentPosition, 2384 Set<String> modifiedSchemaFiles) 2385 throws DirectoryException 2386 { 2387 // See if the specified name form is actually defined in the server schema. 2388 // If not, then fail. 2389 NameForm removeNF = schema.getNameForm(nameForm.getOID()); 2390 if ((removeNF == null) || (! removeNF.equals(nameForm))) 2391 { 2392 Message message = ERR_SCHEMA_MODIFY_REMOVE_NO_SUCH_NAME_FORM.get( 2393 nameForm.getNameOrOID()); 2394 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2395 } 2396 2397 2398 // See if there is another modification later to add the name form back 2399 // into the schema. If so, then it's a replace and we should ignore the 2400 // remove because adding it back will handle the replace. 2401 for (int i=currentPosition+1; i < modifications.size(); i++) 2402 { 2403 Modification m = modifications.get(i); 2404 Attribute a = m.getAttribute(); 2405 2406 if ((m.getModificationType() != ModificationType.ADD) || 2407 (! a.getAttributeType().equals(nameFormsType))) 2408 { 2409 continue; 2410 } 2411 2412 for (AttributeValue v : a.getValues()) 2413 { 2414 NameForm nf; 2415 try 2416 { 2417 nf = NameFormSyntax.decodeNameForm(v.getValue(), schema, true); 2418 } 2419 catch (DirectoryException de) 2420 { 2421 if (debugEnabled()) 2422 { 2423 TRACER.debugCaught(DebugLogLevel.ERROR, de); 2424 } 2425 2426 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_NAME_FORM.get( 2427 v.getStringValue(), de.getMessageObject()); 2428 throw new DirectoryException( 2429 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 2430 de); 2431 } 2432 2433 if (nameForm.getOID().equals(nf.getOID())) 2434 { 2435 // We found a match where the name form is added back later, so we 2436 // don't need to do anything else here. 2437 return; 2438 } 2439 } 2440 } 2441 2442 2443 // Make sure that the name form isn't referenced by any DIT structure 2444 // rule. 2445 DITStructureRule dsr = schema.getDITStructureRule(removeNF); 2446 if (dsr != null) 2447 { 2448 Message message = ERR_SCHEMA_MODIFY_REMOVE_NF_IN_DSR.get( 2449 removeNF.getNameOrOID(), dsr.getNameOrRuleID()); 2450 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2451 } 2452 2453 2454 // If we've gotten here, then it's OK to remove the name form from the 2455 // schema. 2456 schema.deregisterNameForm(removeNF); 2457 String schemaFile = removeNF.getSchemaFile(); 2458 if (schemaFile != null) 2459 { 2460 modifiedSchemaFiles.add(schemaFile); 2461 } 2462 } 2463 2464 2465 2466 /** 2467 * Handles all processing required for adding the provided DIT content rule to 2468 * the given schema, replacing an existing rule if necessary, and ensuring 2469 * all other metadata is properly updated. 2470 * 2471 * @param ditContentRule The DIT content rule to add or replace in the 2472 * server schema. 2473 * @param schema The schema to which the DIT content rule 2474 * should be be added. 2475 * @param modifiedSchemaFiles The names of the schema files containing 2476 * schema elements that have been updated as part 2477 * of the schema modification. 2478 * 2479 * @throws DirectoryException If a problem occurs while attempting to add 2480 * the provided DIT content rule to the server 2481 * schema. 2482 */ 2483 private void addDITContentRule(DITContentRule ditContentRule, Schema schema, 2484 Set<String> modifiedSchemaFiles) 2485 throws DirectoryException 2486 { 2487 // First, see if the specified DIT content rule already exists. We'll check 2488 // all of the names, which means that it's possible there could be more than 2489 // one match (although if there is, then we'll refuse the operation). 2490 DITContentRule existingDCR = null; 2491 for (DITContentRule dcr : schema.getDITContentRules().values()) 2492 { 2493 for (String name : ditContentRule.getNames().keySet()) 2494 { 2495 if (dcr.hasName(name)) 2496 { 2497 if (existingDCR == null) 2498 { 2499 existingDCR = dcr; 2500 break; 2501 } 2502 else 2503 { 2504 Message message = ERR_SCHEMA_MODIFY_MULTIPLE_CONFLICTS_FOR_ADD_DCR. 2505 get(ditContentRule.getName(), existingDCR.getName(), 2506 dcr.getName()); 2507 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 2508 message); 2509 } 2510 } 2511 } 2512 } 2513 2514 2515 // Get the structural class for the new DIT content rule and see if there's 2516 // already an existing rule that is associated with that class. If there 2517 // is, then it will only be acceptable if it's the DIT content rule that we 2518 // are replacing (in which case we really do want to use the "!=" operator). 2519 ObjectClass structuralClass = ditContentRule.getStructuralClass(); 2520 DITContentRule existingRuleForClass = 2521 schema.getDITContentRule(structuralClass); 2522 if ((existingRuleForClass != null) && (existingRuleForClass != existingDCR)) 2523 { 2524 Message message = ERR_SCHEMA_MODIFY_STRUCTURAL_OC_CONFLICT_FOR_ADD_DCR. 2525 get(ditContentRule.getName(), structuralClass.getNameOrOID(), 2526 existingRuleForClass.getName()); 2527 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2528 } 2529 2530 2531 // Make sure that the new DIT content rule doesn't reference an undefined 2532 // structural or auxiliaryclass, or an undefined required, optional, or 2533 // prohibited attribute type. 2534 if (! schema.hasObjectClass(structuralClass.getOID())) 2535 { 2536 Message message = ERR_SCHEMA_MODIFY_DCR_UNDEFINED_STRUCTURAL_OC.get( 2537 ditContentRule.getName(), structuralClass.getNameOrOID()); 2538 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2539 } 2540 2541 if (structuralClass.getObjectClassType() != ObjectClassType.STRUCTURAL) 2542 { 2543 Message message = ERR_SCHEMA_MODIFY_DCR_OC_NOT_STRUCTURAL.get( 2544 ditContentRule.getName(), structuralClass.getNameOrOID()); 2545 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2546 } 2547 2548 if (structuralClass.isObsolete()) 2549 { 2550 Message message = ERR_SCHEMA_MODIFY_DCR_STRUCTURAL_OC_OBSOLETE.get( 2551 ditContentRule.getName(), structuralClass.getNameOrOID()); 2552 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2553 } 2554 2555 for (ObjectClass oc : ditContentRule.getAuxiliaryClasses()) 2556 { 2557 if (! schema.hasObjectClass(oc.getOID())) 2558 { 2559 Message message = ERR_SCHEMA_MODIFY_DCR_UNDEFINED_AUXILIARY_OC.get( 2560 ditContentRule.getName(), oc.getNameOrOID()); 2561 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2562 } 2563 if (oc.getObjectClassType() != ObjectClassType.AUXILIARY) 2564 { 2565 Message message = ERR_SCHEMA_MODIFY_DCR_OC_NOT_AUXILIARY.get( 2566 ditContentRule.getName(), oc.getNameOrOID()); 2567 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2568 } 2569 if (oc.isObsolete()) 2570 { 2571 Message message = ERR_SCHEMA_MODIFY_DCR_OBSOLETE_AUXILIARY_OC.get( 2572 ditContentRule.getName(), oc.getNameOrOID()); 2573 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2574 } 2575 } 2576 2577 for (AttributeType at : ditContentRule.getRequiredAttributes()) 2578 { 2579 if (! schema.hasAttributeType(at.getOID())) 2580 { 2581 Message message = ERR_SCHEMA_MODIFY_DCR_UNDEFINED_REQUIRED_ATTR.get( 2582 ditContentRule.getName(), at.getNameOrOID()); 2583 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2584 } 2585 else if (at.isObsolete()) 2586 { 2587 Message message = ERR_SCHEMA_MODIFY_DCR_OBSOLETE_REQUIRED_ATTR.get( 2588 ditContentRule.getName(), at.getNameOrOID()); 2589 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2590 } 2591 } 2592 2593 for (AttributeType at : ditContentRule.getOptionalAttributes()) 2594 { 2595 if (! schema.hasAttributeType(at.getOID())) 2596 { 2597 Message message = ERR_SCHEMA_MODIFY_DCR_UNDEFINED_OPTIONAL_ATTR.get( 2598 ditContentRule.getName(), at.getNameOrOID()); 2599 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2600 } 2601 else if (at.isObsolete()) 2602 { 2603 Message message = ERR_SCHEMA_MODIFY_DCR_OBSOLETE_OPTIONAL_ATTR.get( 2604 ditContentRule.getName(), at.getNameOrOID()); 2605 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2606 } 2607 } 2608 2609 for (AttributeType at : ditContentRule.getProhibitedAttributes()) 2610 { 2611 if (! schema.hasAttributeType(at.getOID())) 2612 { 2613 Message message = ERR_SCHEMA_MODIFY_DCR_UNDEFINED_PROHIBITED_ATTR.get( 2614 ditContentRule.getName(), at.getNameOrOID()); 2615 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2616 } 2617 else if (at.isObsolete()) 2618 { 2619 Message message = ERR_SCHEMA_MODIFY_DCR_OBSOLETE_PROHIBITED_ATTR.get( 2620 ditContentRule.getName(), at.getNameOrOID()); 2621 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2622 } 2623 } 2624 2625 2626 // If there is no existing rule, then we're adding a new DIT content rule. 2627 // Otherwise, we're replacing an existing one. 2628 if (existingDCR == null) 2629 { 2630 schema.registerDITContentRule(ditContentRule, false); 2631 String schemaFile = ditContentRule.getSchemaFile(); 2632 if ((schemaFile == null) || (schemaFile.length() == 0)) 2633 { 2634 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2635 ditContentRule.setSchemaFile(schemaFile); 2636 } 2637 2638 modifiedSchemaFiles.add(schemaFile); 2639 } 2640 else 2641 { 2642 schema.deregisterDITContentRule(existingDCR); 2643 schema.registerDITContentRule(ditContentRule, false); 2644 schema.rebuildDependentElements(existingDCR); 2645 2646 if ((ditContentRule.getSchemaFile() == null) || 2647 (ditContentRule.getSchemaFile().length() == 0)) 2648 { 2649 String schemaFile = existingDCR.getSchemaFile(); 2650 if ((schemaFile == null) || (schemaFile.length() == 0)) 2651 { 2652 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2653 } 2654 2655 ditContentRule.setSchemaFile(schemaFile); 2656 modifiedSchemaFiles.add(schemaFile); 2657 } 2658 else 2659 { 2660 String newSchemaFile = ditContentRule.getSchemaFile(); 2661 String oldSchemaFile = existingDCR.getSchemaFile(); 2662 if ((oldSchemaFile == null) || oldSchemaFile.equals(newSchemaFile)) 2663 { 2664 modifiedSchemaFiles.add(newSchemaFile); 2665 } 2666 else 2667 { 2668 modifiedSchemaFiles.add(newSchemaFile); 2669 modifiedSchemaFiles.add(oldSchemaFile); 2670 } 2671 } 2672 } 2673 } 2674 2675 2676 2677 /** 2678 * Handles all processing required to remove the provided DIT content rule 2679 * from the server schema, ensuring all other metadata is properly updated. 2680 * Note that this method will first check to see whether the same rule will be 2681 * later added to the server schema with an updated definition, and if so then 2682 * the removal will be ignored because the later add will be handled as a 2683 * replace. If the DIT content rule will not be replaced with a new 2684 * definition, then this method will ensure that there are no other schema 2685 * elements that depend on the rule before allowing it to be removed. 2686 * 2687 * @param ditContentRule The DIT content rule to remove from the server 2688 * schema. 2689 * @param schema The schema from which the DIT content rule 2690 * should be removed. 2691 * @param modifications The full set of modifications to be processed 2692 * against the server schema. 2693 * @param currentPosition The position of the modification currently 2694 * being performed. 2695 * @param modifiedSchemaFiles The names of the schema files containing 2696 * schema elements that have been updated as part 2697 * of the schema modification. 2698 * 2699 * @throws DirectoryException If a problem occurs while attempting to remove 2700 * the provided DIT content rule from the server 2701 * schema. 2702 */ 2703 private void removeDITContentRule(DITContentRule ditContentRule, 2704 Schema schema, 2705 ArrayList<Modification> modifications, 2706 int currentPosition, 2707 Set<String> modifiedSchemaFiles) 2708 throws DirectoryException 2709 { 2710 // See if the specified DIT content rule is actually defined in the server 2711 // schema. If not, then fail. 2712 DITContentRule removeDCR = 2713 schema.getDITContentRule(ditContentRule.getStructuralClass()); 2714 if ((removeDCR == null) || (! removeDCR.equals(ditContentRule))) 2715 { 2716 Message message = 2717 ERR_SCHEMA_MODIFY_REMOVE_NO_SUCH_DCR.get(ditContentRule.getName()); 2718 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2719 } 2720 2721 2722 // Since DIT content rules don't have any dependencies, then we don't need 2723 // to worry about the difference between a remove or a replace. We can 2724 // just remove the DIT content rule now, and if it is added back later then 2725 // there still won't be any conflict. 2726 schema.deregisterDITContentRule(removeDCR); 2727 String schemaFile = removeDCR.getSchemaFile(); 2728 if (schemaFile != null) 2729 { 2730 modifiedSchemaFiles.add(schemaFile); 2731 } 2732 } 2733 2734 2735 2736 /** 2737 * Handles all processing required for adding the provided DIT structure rule 2738 * to the given schema, replacing an existing rule if necessary, and ensuring 2739 * all other metadata is properly updated. 2740 * 2741 * @param ditStructureRule The DIT structure rule to add or replace in 2742 * the server schema. 2743 * @param schema The schema to which the DIT structure rule 2744 * should be be added. 2745 * @param modifiedSchemaFiles The names of the schema files containing 2746 * schema elements that have been updated as part 2747 * of the schema modification. 2748 * 2749 * @throws DirectoryException If a problem occurs while attempting to add 2750 * the provided DIT structure rule to the server 2751 * schema. 2752 */ 2753 private void addDITStructureRule(DITStructureRule ditStructureRule, 2754 Schema schema, 2755 Set<String> modifiedSchemaFiles) 2756 throws DirectoryException 2757 { 2758 // First, see if the specified DIT structure rule already exists. We'll 2759 // check the rule ID and all of the names, which means that it's possible 2760 // there could be more than one match (although if there is, then we'll 2761 // refuse the operation). 2762 DITStructureRule existingDSR = 2763 schema.getDITStructureRule(ditStructureRule.getRuleID()); 2764 for (DITStructureRule dsr : schema.getDITStructureRulesByID().values()) 2765 { 2766 for (String name : ditStructureRule.getNames().keySet()) 2767 { 2768 if (dsr.hasName(name)) 2769 { 2770 // We really do want to use the "!=" operator here because it's 2771 // acceptable if we find match for the same object instance. 2772 if ((existingDSR != null) && (existingDSR != dsr)) 2773 { 2774 Message message = ERR_SCHEMA_MODIFY_MULTIPLE_CONFLICTS_FOR_ADD_DSR. 2775 get(ditStructureRule.getNameOrRuleID(), 2776 existingDSR.getNameOrRuleID(), dsr.getNameOrRuleID()); 2777 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 2778 message); 2779 } 2780 } 2781 } 2782 } 2783 2784 2785 // Get the name form for the new DIT structure rule and see if there's 2786 // already an existing rule that is associated with that name form. If 2787 // there is, then it will only be acceptable if it's the DIT structure rule 2788 // that we are replacing (in which case we really do want to use the "!=" 2789 // operator). 2790 NameForm nameForm = ditStructureRule.getNameForm(); 2791 DITStructureRule existingRuleForNameForm = 2792 schema.getDITStructureRule(nameForm); 2793 if ((existingRuleForNameForm != null) && 2794 (existingRuleForNameForm != existingDSR)) 2795 { 2796 Message message = ERR_SCHEMA_MODIFY_NAME_FORM_CONFLICT_FOR_ADD_DSR. 2797 get(ditStructureRule.getNameOrRuleID(), nameForm.getNameOrOID(), 2798 existingRuleForNameForm.getNameOrRuleID()); 2799 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2800 } 2801 2802 2803 // Make sure that the new DIT structure rule doesn't reference an undefined 2804 // name form or superior DIT structure rule. 2805 if (! schema.hasNameForm(nameForm.getOID())) 2806 { 2807 Message message = ERR_SCHEMA_MODIFY_DSR_UNDEFINED_NAME_FORM.get( 2808 ditStructureRule.getNameOrRuleID(), nameForm.getNameOrOID()); 2809 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2810 } 2811 if (nameForm.isObsolete()) 2812 { 2813 Message message = ERR_SCHEMA_MODIFY_DSR_OBSOLETE_NAME_FORM.get( 2814 ditStructureRule.getNameOrRuleID(), nameForm.getNameOrOID()); 2815 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2816 } 2817 2818 2819 // If there are any superior rules, then make sure none of them are marked 2820 // OBSOLETE. 2821 for (DITStructureRule dsr : ditStructureRule.getSuperiorRules()) 2822 { 2823 if (dsr.isObsolete()) 2824 { 2825 Message message = ERR_SCHEMA_MODIFY_DSR_OBSOLETE_SUPERIOR_RULE.get( 2826 ditStructureRule.getNameOrRuleID(), dsr.getNameOrRuleID()); 2827 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 2828 } 2829 } 2830 2831 2832 // If there is no existing rule, then we're adding a new DIT structure rule. 2833 // Otherwise, we're replacing an existing one. 2834 if (existingDSR == null) 2835 { 2836 schema.registerDITStructureRule(ditStructureRule, false); 2837 String schemaFile = ditStructureRule.getSchemaFile(); 2838 if ((schemaFile == null) || (schemaFile.length() == 0)) 2839 { 2840 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2841 ditStructureRule.setSchemaFile(schemaFile); 2842 } 2843 2844 modifiedSchemaFiles.add(schemaFile); 2845 } 2846 else 2847 { 2848 schema.deregisterDITStructureRule(existingDSR); 2849 schema.registerDITStructureRule(ditStructureRule, false); 2850 schema.rebuildDependentElements(existingDSR); 2851 2852 if ((ditStructureRule.getSchemaFile() == null) || 2853 (ditStructureRule.getSchemaFile().length() == 0)) 2854 { 2855 String schemaFile = existingDSR.getSchemaFile(); 2856 if ((schemaFile == null) || (schemaFile.length() == 0)) 2857 { 2858 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 2859 } 2860 2861 ditStructureRule.setSchemaFile(schemaFile); 2862 modifiedSchemaFiles.add(schemaFile); 2863 } 2864 else 2865 { 2866 String newSchemaFile = ditStructureRule.getSchemaFile(); 2867 String oldSchemaFile = existingDSR.getSchemaFile(); 2868 if ((oldSchemaFile == null) || oldSchemaFile.equals(newSchemaFile)) 2869 { 2870 modifiedSchemaFiles.add(newSchemaFile); 2871 } 2872 else 2873 { 2874 modifiedSchemaFiles.add(newSchemaFile); 2875 modifiedSchemaFiles.add(oldSchemaFile); 2876 } 2877 } 2878 } 2879 } 2880 2881 2882 2883 /** 2884 * Handles all processing required to remove the provided DIT structure rule 2885 * from the server schema, ensuring all other metadata is properly updated. 2886 * Note that this method will first check to see whether the same rule will be 2887 * later added to the server schema with an updated definition, and if so then 2888 * the removal will be ignored because the later add will be handled as a 2889 * replace. If the DIT structure rule will not be replaced with a new 2890 * definition, then this method will ensure that there are no other schema 2891 * elements that depend on the rule before allowing it to be removed. 2892 * 2893 * @param ditStructureRule The DIT structure rule to remove from the 2894 * server schema. 2895 * @param schema The schema from which the DIT structure rule 2896 * should be removed. 2897 * @param modifications The full set of modifications to be processed 2898 * against the server schema. 2899 * @param currentPosition The position of the modification currently 2900 * being performed. 2901 * @param modifiedSchemaFiles The names of the schema files containing 2902 * schema elements that have been updated as part 2903 * of the schema modification. 2904 * 2905 * @throws DirectoryException If a problem occurs while attempting to remove 2906 * the provided DIT structure rule from the 2907 * server schema. 2908 */ 2909 private void removeDITStructureRule(DITStructureRule ditStructureRule, 2910 Schema schema, 2911 ArrayList<Modification> modifications, 2912 int currentPosition, 2913 Set<String> modifiedSchemaFiles) 2914 throws DirectoryException 2915 { 2916 // See if the specified DIT structure rule is actually defined in the server 2917 // schema. If not, then fail. 2918 DITStructureRule removeDSR = 2919 schema.getDITStructureRule(ditStructureRule.getRuleID()); 2920 if ((removeDSR == null) || (! removeDSR.equals(ditStructureRule))) 2921 { 2922 Message message = ERR_SCHEMA_MODIFY_REMOVE_NO_SUCH_DSR.get( 2923 ditStructureRule.getNameOrRuleID()); 2924 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2925 } 2926 2927 2928 // See if there is another modification later to add the DIT structure rule 2929 // back into the schema. If so, then it's a replace and we should ignore 2930 // the remove because adding it back will handle the replace. 2931 for (int i=currentPosition+1; i < modifications.size(); i++) 2932 { 2933 Modification m = modifications.get(i); 2934 Attribute a = m.getAttribute(); 2935 2936 if ((m.getModificationType() != ModificationType.ADD) || 2937 (! a.getAttributeType().equals(ditStructureRulesType))) 2938 { 2939 continue; 2940 } 2941 2942 for (AttributeValue v : a.getValues()) 2943 { 2944 DITStructureRule dsr; 2945 try 2946 { 2947 dsr = DITStructureRuleSyntax.decodeDITStructureRule( 2948 v.getValue(), schema, true); 2949 } 2950 catch (DirectoryException de) 2951 { 2952 if (debugEnabled()) 2953 { 2954 TRACER.debugCaught(DebugLogLevel.ERROR, de); 2955 } 2956 2957 Message message = ERR_SCHEMA_MODIFY_CANNOT_DECODE_DSR.get( 2958 v.getStringValue(), de.getMessageObject()); 2959 throw new DirectoryException( 2960 ResultCode.INVALID_ATTRIBUTE_SYNTAX, message, 2961 de); 2962 } 2963 2964 if (ditStructureRule.getRuleID() == dsr.getRuleID()) 2965 { 2966 // We found a match where the DIT structure rule is added back later, 2967 // so we don't need to do anything else here. 2968 return; 2969 } 2970 } 2971 } 2972 2973 2974 // Make sure that the DIT structure rule isn't the superior for any other 2975 // DIT structure rule. 2976 for (DITStructureRule dsr : schema.getDITStructureRulesByID().values()) 2977 { 2978 if (dsr.getSuperiorRules().contains(removeDSR)) 2979 { 2980 Message message = ERR_SCHEMA_MODIFY_REMOVE_DSR_SUPERIOR_RULE.get( 2981 removeDSR.getNameOrRuleID(), dsr.getNameOrRuleID()); 2982 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 2983 } 2984 } 2985 2986 2987 // If we've gotten here, then it's OK to remove the DIT structure rule from 2988 // the schema. 2989 schema.deregisterDITStructureRule(removeDSR); 2990 String schemaFile = removeDSR.getSchemaFile(); 2991 if (schemaFile != null) 2992 { 2993 modifiedSchemaFiles.add(schemaFile); 2994 } 2995 } 2996 2997 2998 2999 /** 3000 * Handles all processing required for adding the provided matching rule use 3001 * to the given schema, replacing an existing use if necessary, and ensuring 3002 * all other metadata is properly updated. 3003 * 3004 * @param matchingRuleUse The matching rule use to add or replace in the 3005 * server schema. 3006 * @param schema The schema to which the matching rule use 3007 * should be added. 3008 * @param modifiedSchemaFiles The names of the schema files containing 3009 * schema elements that have been updated as part 3010 * of the schema modification. 3011 * 3012 * @throws DirectoryException If a problem occurs while attempting to add 3013 * the provided matching rule use to the server 3014 * schema. 3015 */ 3016 private void addMatchingRuleUse(MatchingRuleUse matchingRuleUse, 3017 Schema schema, 3018 Set<String> modifiedSchemaFiles) 3019 throws DirectoryException 3020 { 3021 // First, see if the specified matching rule use already exists. We'll 3022 // check all of the names, which means that it's possible that there could 3023 // be more than one match (although if there is, then we'll refuse the 3024 // operation). 3025 MatchingRuleUse existingMRU = null; 3026 for (MatchingRuleUse mru : schema.getMatchingRuleUses().values()) 3027 { 3028 for (String name : matchingRuleUse.getNames().keySet()) 3029 { 3030 if (mru.hasName(name)) 3031 { 3032 if (existingMRU == null) 3033 { 3034 existingMRU = mru; 3035 break; 3036 } 3037 else 3038 { 3039 Message message = 3040 ERR_SCHEMA_MODIFY_MULTIPLE_CONFLICTS_FOR_ADD_MR_USE.get( 3041 matchingRuleUse.getName(), 3042 existingMRU.getName(), 3043 mru.getName()); 3044 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 3045 message); 3046 } 3047 } 3048 } 3049 } 3050 3051 3052 // Get the matching rule for the new matching rule use and see if there's 3053 // already an existing matching rule use that is associated with that 3054 // matching rule. If there is, then it will only be acceptable if it's the 3055 // matching rule use that we are replacing (in which case we really do want 3056 // to use the "!=" operator). 3057 MatchingRule matchingRule = matchingRuleUse.getMatchingRule(); 3058 MatchingRuleUse existingMRUForRule = 3059 schema.getMatchingRuleUse(matchingRule); 3060 if ((existingMRUForRule != null) && (existingMRUForRule != existingMRU)) 3061 { 3062 Message message = ERR_SCHEMA_MODIFY_MR_CONFLICT_FOR_ADD_MR_USE. 3063 get(matchingRuleUse.getName(), matchingRule.getNameOrOID(), 3064 existingMRUForRule.getName()); 3065 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 3066 } 3067 3068 if (matchingRule.isObsolete()) 3069 { 3070 Message message = ERR_SCHEMA_MODIFY_MRU_OBSOLETE_MR.get( 3071 matchingRuleUse.getName(), matchingRule.getNameOrOID()); 3072 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 3073 } 3074 3075 3076 // Make sure that the new matching rule use doesn't reference an undefined 3077 // attribute type. 3078 for (AttributeType at : matchingRuleUse.getAttributes()) 3079 { 3080 if (! schema.hasAttributeType(at.getOID())) 3081 { 3082 Message message = ERR_SCHEMA_MODIFY_MRU_UNDEFINED_ATTR.get( 3083 matchingRuleUse.getName(), at.getNameOrOID()); 3084 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 3085 } 3086 else if (at.isObsolete()) 3087 { 3088 Message message = ERR_SCHEMA_MODIFY_MRU_OBSOLETE_ATTR.get( 3089 matchingRuleUse.getName(), matchingRule.getNameOrOID()); 3090 throw new DirectoryException(ResultCode.CONSTRAINT_VIOLATION, message); 3091 } 3092 } 3093 3094 3095 // If there is no existing matching rule use, then we're adding a new one. 3096 // Otherwise, we're replacing an existing matching rule use. 3097 if (existingMRU == null) 3098 { 3099 schema.registerMatchingRuleUse(matchingRuleUse, false); 3100 String schemaFile = matchingRuleUse.getSchemaFile(); 3101 if ((schemaFile == null) || (schemaFile.length() == 0)) 3102 { 3103 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 3104 matchingRuleUse.setSchemaFile(schemaFile); 3105 } 3106 3107 modifiedSchemaFiles.add(schemaFile); 3108 } 3109 else 3110 { 3111 schema.deregisterMatchingRuleUse(existingMRU); 3112 schema.registerMatchingRuleUse(matchingRuleUse, false); 3113 schema.rebuildDependentElements(existingMRU); 3114 3115 if ((matchingRuleUse.getSchemaFile() == null) || 3116 (matchingRuleUse.getSchemaFile().length() == 0)) 3117 { 3118 String schemaFile = existingMRU.getSchemaFile(); 3119 if ((schemaFile == null) || (schemaFile.length() == 0)) 3120 { 3121 schemaFile = FILE_USER_SCHEMA_ELEMENTS; 3122 } 3123 3124 matchingRuleUse.setSchemaFile(schemaFile); 3125 modifiedSchemaFiles.add(schemaFile); 3126 } 3127 else 3128 { 3129 String newSchemaFile = matchingRuleUse.getSchemaFile(); 3130 String oldSchemaFile = existingMRU.getSchemaFile(); 3131 if ((oldSchemaFile == null) || oldSchemaFile.equals(newSchemaFile)) 3132 { 3133 modifiedSchemaFiles.add(newSchemaFile); 3134 } 3135 else 3136 { 3137 modifiedSchemaFiles.add(newSchemaFile); 3138 modifiedSchemaFiles.add(oldSchemaFile); 3139 } 3140 } 3141 } 3142 } 3143 3144 3145 3146 /** 3147 * Handles all processing required to remove the provided matching rule use 3148 * from the server schema, ensuring all other metadata is properly updated. 3149 * Note that this method will first check to see whether the same matching 3150 * rule use will be later added to the server schema with an updated 3151 * definition, and if so then the removal will be ignored because the later 3152 * add will be handled as a replace. If the matching rule use will not be 3153 * replaced with a new definition, then this method will ensure that there are 3154 * no other schema elements that depend on the matching rule use before 3155 * allowing it to be removed. 3156 * 3157 * @param matchingRuleUse The matching rule use to remove from the 3158 * server schema. 3159 * @param schema The schema from which the matching rule use 3160 * should be removed. 3161 * @param modifications The full set of modifications to be processed 3162 * against the server schema. 3163 * @param currentPosition The position of the modification currently 3164 * being performed. 3165 * @param modifiedSchemaFiles The names of the schema files containing 3166 * schema elements that have been updated as part 3167 * of the schema modification. 3168 * 3169 * @throws DirectoryException If a problem occurs while attempting to remove 3170 * the provided matching rule use from the server 3171 * schema. 3172 */ 3173 private void removeMatchingRuleUse(MatchingRuleUse matchingRuleUse, 3174 Schema schema, 3175 ArrayList<Modification> modifications, 3176 int currentPosition, 3177 Set<String> modifiedSchemaFiles) 3178 throws DirectoryException 3179 { 3180 // See if the specified DIT content rule is actually defined in the server 3181 // schema. If not, then fail. 3182 MatchingRuleUse removeMRU = 3183 schema.getMatchingRuleUse(matchingRuleUse.getMatchingRule()); 3184 if ((removeMRU == null) || (! removeMRU.equals(matchingRuleUse))) 3185 { 3186 Message message = ERR_SCHEMA_MODIFY_REMOVE_NO_SUCH_MR_USE.get( 3187 matchingRuleUse.getName()); 3188 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 3189 } 3190 3191 3192 // Since matching rule uses don't have any dependencies, then we don't need 3193 // to worry about the difference between a remove or a replace. We can 3194 // just remove the DIT content rule now, and if it is added back later then 3195 // there still won't be any conflict. 3196 schema.deregisterMatchingRuleUse(removeMRU); 3197 String schemaFile = removeMRU.getSchemaFile(); 3198 if (schemaFile != null) 3199 { 3200 modifiedSchemaFiles.add(schemaFile); 3201 } 3202 } 3203 3204 3205 3206 /** 3207 * Creates an empty entry that may be used as the basis for a new schema file. 3208 * 3209 * @return An empty entry that may be used as the basis for a new schema 3210 * file. 3211 */ 3212 private Entry createEmptySchemaEntry() 3213 { 3214 LinkedHashMap<ObjectClass,String> objectClasses = 3215 new LinkedHashMap<ObjectClass,String>(); 3216 objectClasses.put(DirectoryServer.getTopObjectClass(), OC_TOP); 3217 objectClasses.put(DirectoryServer.getObjectClass(OC_LDAP_SUBENTRY_LC, true), 3218 OC_LDAP_SUBENTRY); 3219 objectClasses.put(DirectoryServer.getObjectClass(OC_SUBSCHEMA, true), 3220 OC_SUBSCHEMA); 3221 3222 LinkedHashMap<AttributeType,List<Attribute>> userAttributes = 3223 new LinkedHashMap<AttributeType,List<Attribute>>(); 3224 3225 LinkedHashMap<AttributeType,List<Attribute>> operationalAttributes = 3226 new LinkedHashMap<AttributeType,List<Attribute>>(); 3227 3228 DN dn = DirectoryServer.getSchemaDN(); 3229 RDN rdn = dn.getRDN(); 3230 for (int i=0; i < rdn.getNumValues(); i++) 3231 { 3232 AttributeType type = rdn.getAttributeType(i); 3233 String name = rdn.getAttributeName(i); 3234 3235 LinkedHashSet<AttributeValue> values = 3236 new LinkedHashSet<AttributeValue>(1); 3237 values.add(rdn.getAttributeValue(i)); 3238 3239 LinkedList<Attribute> attrList = new LinkedList<Attribute>(); 3240 attrList.add(new Attribute(type, name, values)); 3241 if (type.isOperational()) 3242 { 3243 operationalAttributes.put(type, attrList); 3244 } 3245 else 3246 { 3247 userAttributes.put(type, attrList); 3248 } 3249 } 3250 3251 return new Entry(dn, objectClasses, userAttributes, operationalAttributes); 3252 } 3253 3254 3255 3256 3257 /** 3258 * Writes a temporary version of the specified schema file. 3259 * 3260 * @param schema The schema from which to take the definitions to be 3261 * written. 3262 * @param schemaFile The name of the schema file to be written. 3263 * 3264 * @throws DirectoryException If an unexpected problem occurs while 3265 * identifying the schema definitions to include 3266 * in the schema file. 3267 * 3268 * @throws IOException If an unexpected error occurs while attempting to 3269 * write the temporary schema file. 3270 * 3271 * @throws LDIFException If an unexpected problem occurs while generating 3272 * the LDIF representation of the schema entry. 3273 */ 3274 private File writeTempSchemaFile(Schema schema, String schemaFile) 3275 throws DirectoryException, IOException, LDIFException 3276 { 3277 // Start with an empty schema entry. 3278 Entry schemaEntry = createEmptySchemaEntry(); 3279 3280 3281 // Add all of the appropriate attribute types to the schema entry. We need 3282 // to be careful of the ordering to ensure that any superior types in the 3283 // same file are written before the subordinate types. 3284 HashSet<AttributeType> addedTypes = new HashSet<AttributeType>(); 3285 LinkedHashSet<AttributeValue> values = new LinkedHashSet<AttributeValue>(); 3286 for (AttributeType at : schema.getAttributeTypes().values()) 3287 { 3288 if (schemaFile.equals(at.getSchemaFile())) 3289 { 3290 addAttrTypeToSchemaFile(schema, schemaFile, at, values, addedTypes, 0); 3291 } 3292 } 3293 3294 if (! values.isEmpty()) 3295 { 3296 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 3297 attrList.add(new Attribute(attributeTypesType, 3298 attributeTypesType.getPrimaryName(), values)); 3299 schemaEntry.putAttribute(attributeTypesType, attrList); 3300 } 3301 3302 3303 // Add all of the appropriate objectclasses to the schema entry. We need 3304 // to be careful of the ordering to ensure that any superior classes in the 3305 // same file are written before the subordinate classes. 3306 HashSet<ObjectClass> addedClasses = new HashSet<ObjectClass>(); 3307 values = new LinkedHashSet<AttributeValue>(); 3308 for (ObjectClass oc : schema.getObjectClasses().values()) 3309 { 3310 if (schemaFile.equals(oc.getSchemaFile())) 3311 { 3312 addObjectClassToSchemaFile(schema, schemaFile, oc, values, addedClasses, 3313 0); 3314 } 3315 } 3316 3317 if (! values.isEmpty()) 3318 { 3319 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 3320 attrList.add(new Attribute(objectClassesType, 3321 objectClassesType.getPrimaryName(), values)); 3322 schemaEntry.putAttribute(objectClassesType, attrList); 3323 } 3324 3325 3326 // Add all of the appropriate name forms to the schema entry. Since there 3327 // is no hierarchical relationship between name forms, we don't need to 3328 // worry about ordering. 3329 values = new LinkedHashSet<AttributeValue>(); 3330 for (NameForm nf : schema.getNameFormsByObjectClass().values()) 3331 { 3332 if (schemaFile.equals(nf.getSchemaFile())) 3333 { 3334 values.add(new AttributeValue(nameFormsType, nf.getDefinition())); 3335 } 3336 } 3337 3338 if (! values.isEmpty()) 3339 { 3340 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 3341 attrList.add(new Attribute(nameFormsType, 3342 nameFormsType.getPrimaryName(), values)); 3343 schemaEntry.putAttribute(nameFormsType, attrList); 3344 } 3345 3346 3347 // Add all of the appropriate DIT content rules to the schema entry. Since 3348 // there is no hierarchical relationship between DIT content rules, we don't 3349 // need to worry about ordering. 3350 values = new LinkedHashSet<AttributeValue>(); 3351 for (DITContentRule dcr : schema.getDITContentRules().values()) 3352 { 3353 if (schemaFile.equals(dcr.getSchemaFile())) 3354 { 3355 values.add(new AttributeValue(ditContentRulesType, 3356 dcr.getDefinition())); 3357 } 3358 } 3359 3360 if (! values.isEmpty()) 3361 { 3362 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 3363 attrList.add(new Attribute(ditContentRulesType, 3364 ditContentRulesType.getPrimaryName(), values)); 3365 schemaEntry.putAttribute(ditContentRulesType, attrList); 3366 } 3367 3368 3369 // Add all of the appropriate DIT structure rules to the schema entry. We 3370 // need to be careful of the ordering to ensure that any superior rules in 3371 // the same file are written before the subordinate rules. 3372 HashSet<DITStructureRule> addedDSRs = new HashSet<DITStructureRule>(); 3373 values = new LinkedHashSet<AttributeValue>(); 3374 for (DITStructureRule dsr : schema.getDITStructureRulesByID().values()) 3375 { 3376 if (schemaFile.equals(dsr.getSchemaFile())) 3377 { 3378 addDITStructureRuleToSchemaFile(schema, schemaFile, dsr, values, 3379 addedDSRs, 0); 3380 } 3381 } 3382 3383 if (! values.isEmpty()) 3384 { 3385 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 3386 attrList.add(new Attribute(ditStructureRulesType, 3387 ditStructureRulesType.getPrimaryName(), 3388 values)); 3389 schemaEntry.putAttribute(ditStructureRulesType, attrList); 3390 } 3391 3392 3393 // Add all of the appropriate matching rule uses to the schema entry. Since 3394 // there is no hierarchical relationship between matching rule uses, we 3395 // don't need to worry about ordering. 3396 values = new LinkedHashSet<AttributeValue>(); 3397 for (MatchingRuleUse mru : schema.getMatchingRuleUses().values()) 3398 { 3399 if (schemaFile.equals(mru.getSchemaFile())) 3400 { 3401 values.add(new AttributeValue(matchingRuleUsesType, 3402 mru.getDefinition())); 3403 } 3404 } 3405 3406 if (! values.isEmpty()) 3407 { 3408 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 3409 attrList.add(new Attribute(matchingRuleUsesType, 3410 matchingRuleUsesType.getPrimaryName(), 3411 values)); 3412 schemaEntry.putAttribute(matchingRuleUsesType, attrList); 3413 } 3414 3415 if (schemaFile.equals(FILE_USER_SCHEMA_ELEMENTS)) 3416 { 3417 Map<String, Attribute> attributes = schema.getExtraAttributes(); 3418 for (Attribute attribute : attributes.values()) 3419 { 3420 ArrayList<Attribute> attrList = new ArrayList<Attribute>(1); 3421 attrList.add(attribute); 3422 schemaEntry.putAttribute(attribute.getAttributeType(), attrList); 3423 } 3424 } 3425 3426 // Create a temporary file to which we can write the schema entry. 3427 File tempFile = File.createTempFile(schemaFile, "temp"); 3428 LDIFExportConfig exportConfig = 3429 new LDIFExportConfig(tempFile.getAbsolutePath(), 3430 ExistingFileBehavior.OVERWRITE); 3431 LDIFWriter ldifWriter = new LDIFWriter(exportConfig); 3432 ldifWriter.writeEntry(schemaEntry); 3433 ldifWriter.close(); 3434 3435 return tempFile; 3436 } 3437 3438 3439 3440 /** 3441 * Adds the definition for the specified attribute type to the provided set of 3442 * attribute values, recursively adding superior types as appropriate. 3443 * 3444 * @param schema The schema containing the attribute type. 3445 * @param schemaFile The schema file with which the attribute type is 3446 * associated. 3447 * @param attributeType The attribute type whose definition should be added 3448 * to the value set. 3449 * @param values The set of values for attribute type definitions 3450 * already added. 3451 * @param addedTypes The set of attribute types whose definitions have 3452 * already been added to the set of values. 3453 * @param depth A depth counter to use in an attempt to detect 3454 * circular references. 3455 */ 3456 private void addAttrTypeToSchemaFile(Schema schema, String schemaFile, 3457 AttributeType attributeType, 3458 LinkedHashSet<AttributeValue> values, 3459 HashSet<AttributeType> addedTypes, 3460 int depth) 3461 throws DirectoryException 3462 { 3463 if (depth > 20) 3464 { 3465 Message message = ERR_SCHEMA_MODIFY_CIRCULAR_REFERENCE_AT.get( 3466 attributeType.getNameOrOID()); 3467 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 3468 } 3469 3470 if (addedTypes.contains(attributeType)) 3471 { 3472 return; 3473 } 3474 3475 AttributeType superiorType = attributeType.getSuperiorType(); 3476 if ((superiorType != null) && 3477 schemaFile.equals(superiorType.getSchemaFile()) && 3478 (! addedTypes.contains(superiorType))) 3479 { 3480 addAttrTypeToSchemaFile(schema, schemaFile, superiorType, values, 3481 addedTypes, depth+1); 3482 } 3483 3484 values.add(new AttributeValue(attributeTypesType, 3485 attributeType.getDefinition())); 3486 addedTypes.add(attributeType); 3487 } 3488 3489 3490 3491 /** 3492 * Adds the definition for the specified objectclass to the provided set of 3493 * attribute values, recursively adding superior classes as appropriate. 3494 * 3495 * @param schema The schema containing the objectclass. 3496 * @param schemaFile The schema file with which the objectclass is 3497 * associated. 3498 * @param objectClass The objectclass whose definition should be added to 3499 * the value set. 3500 * @param values The set of values for objectclass definitions 3501 * already added. 3502 * @param addedClasses The set of objectclasses whose definitions have 3503 * already been added to the set of values. 3504 * @param depth A depth counter to use in an attempt to detect 3505 * circular references. 3506 */ 3507 private void addObjectClassToSchemaFile(Schema schema, String schemaFile, 3508 ObjectClass objectClass, 3509 LinkedHashSet<AttributeValue> values, 3510 HashSet<ObjectClass> addedClasses, 3511 int depth) 3512 throws DirectoryException 3513 { 3514 if (depth > 20) 3515 { 3516 Message message = ERR_SCHEMA_MODIFY_CIRCULAR_REFERENCE_OC.get( 3517 objectClass.getNameOrOID()); 3518 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 3519 } 3520 3521 if (addedClasses.contains(objectClass)) 3522 { 3523 return; 3524 } 3525 3526 ObjectClass superiorClass = objectClass.getSuperiorClass(); 3527 if ((superiorClass != null) && 3528 schemaFile.equals(superiorClass.getSchemaFile()) && 3529 (! addedClasses.contains(superiorClass))) 3530 { 3531 addObjectClassToSchemaFile(schema, schemaFile, superiorClass, values, 3532 addedClasses, depth+1); 3533 } 3534 3535 values.add(new AttributeValue(objectClassesType, 3536 objectClass.getDefinition())); 3537 addedClasses.add(objectClass); 3538 } 3539 3540 3541 3542 /** 3543 * Adds the definition for the specified DIT structure rule to the provided 3544 * set of attribute values, recursively adding superior rules as appropriate. 3545 * 3546 * @param schema The schema containing the DIT structure rule. 3547 * @param schemaFile The schema file with which the DIT structure rule 3548 * is associated. 3549 * @param ditStructureRule The DIT structure rule whose definition should be 3550 * added to the value set. 3551 * @param values The set of values for DIT structure rule 3552 * definitions already added. 3553 * @param addedDSRs The set of DIT structure rules whose definitions 3554 * have already been added added to the set of 3555 * values. 3556 * @param depth A depth counter to use in an attempt to detect 3557 * circular references. 3558 */ 3559 private void addDITStructureRuleToSchemaFile(Schema schema, String schemaFile, 3560 DITStructureRule ditStructureRule, 3561 LinkedHashSet<AttributeValue> values, 3562 HashSet<DITStructureRule> addedDSRs, int depth) 3563 throws DirectoryException 3564 { 3565 if (depth > 20) 3566 { 3567 Message message = ERR_SCHEMA_MODIFY_CIRCULAR_REFERENCE_DSR.get( 3568 ditStructureRule.getNameOrRuleID()); 3569 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 3570 } 3571 3572 if (addedDSRs.contains(ditStructureRule)) 3573 { 3574 return; 3575 } 3576 3577 for (DITStructureRule dsr : ditStructureRule.getSuperiorRules()) 3578 { 3579 if (schemaFile.equals(dsr.getSchemaFile()) && (! addedDSRs.contains(dsr))) 3580 { 3581 addDITStructureRuleToSchemaFile(schema, schemaFile, dsr, values, 3582 addedDSRs, depth+1); 3583 } 3584 } 3585 3586 values.add(new AttributeValue(ditStructureRulesType, 3587 ditStructureRule.getDefinition())); 3588 addedDSRs.add(ditStructureRule); 3589 } 3590 3591 3592 3593 /** 3594 * Moves the specified temporary schema files in place of the active versions. 3595 * If an error occurs in the process, then this method will attempt to restore 3596 * the original schema files if possible. 3597 * 3598 * @param tempSchemaFiles The set of temporary schema files to be activated. 3599 * 3600 * @throws DirectoryException If a problem occurs while attempting to 3601 * install the temporary schema files. 3602 */ 3603 private void installSchemaFiles(HashMap<String,File> tempSchemaFiles) 3604 throws DirectoryException 3605 { 3606 // Create lists that will hold the three types of files we'll be dealing 3607 // with (the temporary files that will be installed, the installed schema 3608 // files, and the previously-installed schema files). 3609 ArrayList<File> installedFileList = new ArrayList<File>(); 3610 ArrayList<File> tempFileList = new ArrayList<File>(); 3611 ArrayList<File> origFileList = new ArrayList<File>(); 3612 3613 File schemaDir = new File(SchemaConfigManager.getSchemaDirectoryPath()); 3614 3615 for (String name : tempSchemaFiles.keySet()) 3616 { 3617 installedFileList.add(new File(schemaDir, name)); 3618 tempFileList.add(tempSchemaFiles.get(name)); 3619 origFileList.add(new File(schemaDir, name + ".orig")); 3620 } 3621 3622 3623 // If there are any old ".orig" files laying around from a previous 3624 // attempt, then try to clean them up. 3625 for (File f : origFileList) 3626 { 3627 if (f.exists()) 3628 { 3629 f.delete(); 3630 } 3631 } 3632 3633 3634 // Copy all of the currently-installed files with a ".orig" extension. If 3635 // this fails, then try to clean up the copies. 3636 try 3637 { 3638 for (int i=0; i < installedFileList.size(); i++) 3639 { 3640 File installedFile = installedFileList.get(i); 3641 File origFile = origFileList.get(i); 3642 3643 if (installedFile.exists()) 3644 { 3645 copyFile(installedFile, origFile); 3646 } 3647 } 3648 } 3649 catch (Exception e) 3650 { 3651 if (debugEnabled()) 3652 { 3653 TRACER.debugCaught(DebugLogLevel.ERROR, e); 3654 } 3655 3656 boolean allCleaned = true; 3657 for (File f : origFileList) 3658 { 3659 try 3660 { 3661 if (f.exists()) 3662 { 3663 if (! f.delete()) 3664 { 3665 allCleaned = false; 3666 } 3667 } 3668 } 3669 catch (Exception e2) 3670 { 3671 if (debugEnabled()) 3672 { 3673 TRACER.debugCaught(DebugLogLevel.ERROR, e2); 3674 } 3675 3676 allCleaned = false; 3677 } 3678 } 3679 3680 if (allCleaned) 3681 { 3682 Message message = ERR_SCHEMA_MODIFY_CANNOT_WRITE_ORIG_FILES_CLEANED.get( 3683 getExceptionMessage(e)); 3684 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 3685 message, e); 3686 } 3687 else 3688 { 3689 3690 Message message = ERR_SCHEMA_MODIFY_CANNOT_WRITE_ORIG_FILES_NOT_CLEANED 3691 .get(getExceptionMessage(e)); 3692 3693 DirectoryServer.sendAlertNotification(this, 3694 ALERT_TYPE_CANNOT_COPY_SCHEMA_FILES, 3695 message); 3696 3697 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 3698 message, e); 3699 } 3700 } 3701 3702 3703 // Try to copy all of the temporary files into place over the installed 3704 // files. If this fails, then try to restore the originals. 3705 try 3706 { 3707 for (int i=0; i < installedFileList.size(); i++) 3708 { 3709 File installedFile = installedFileList.get(i); 3710 File tempFile = tempFileList.get(i); 3711 copyFile(tempFile, installedFile); 3712 } 3713 } 3714 catch (Exception e) 3715 { 3716 if (debugEnabled()) 3717 { 3718 TRACER.debugCaught(DebugLogLevel.ERROR, e); 3719 } 3720 3721 for (File f : installedFileList) 3722 { 3723 try 3724 { 3725 if (f.exists()) 3726 { 3727 f.delete(); 3728 } 3729 } 3730 catch (Exception e2) 3731 { 3732 if (debugEnabled()) 3733 { 3734 TRACER.debugCaught(DebugLogLevel.ERROR, e2); 3735 } 3736 } 3737 } 3738 3739 boolean allRestored = true; 3740 for (int i=0; i < installedFileList.size(); i++) 3741 { 3742 File installedFile = installedFileList.get(i); 3743 File origFile = origFileList.get(i); 3744 3745 try 3746 { 3747 if (origFile.exists()) 3748 { 3749 if (! origFile.renameTo(installedFile)) 3750 { 3751 allRestored = false; 3752 } 3753 } 3754 } 3755 catch (Exception e2) 3756 { 3757 if (debugEnabled()) 3758 { 3759 TRACER.debugCaught(DebugLogLevel.ERROR, e2); 3760 } 3761 3762 allRestored = false; 3763 } 3764 } 3765 3766 if (allRestored) 3767 { 3768 Message message = ERR_SCHEMA_MODIFY_CANNOT_WRITE_NEW_FILES_RESTORED.get( 3769 getExceptionMessage(e)); 3770 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 3771 message, e); 3772 } 3773 else 3774 { 3775 Message message = ERR_SCHEMA_MODIFY_CANNOT_WRITE_NEW_FILES_NOT_RESTORED 3776 .get(getExceptionMessage(e)); 3777 3778 DirectoryServer.sendAlertNotification(this, 3779 ALERT_TYPE_CANNOT_WRITE_NEW_SCHEMA_FILES, 3780 message); 3781 3782 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 3783 message, e); 3784 } 3785 } 3786 3787 3788 // At this point, we're committed to the schema change, so we can't throw 3789 // any more exceptions, but all we have left is to clean up the original and 3790 // temporary files. 3791 for (File f : origFileList) 3792 { 3793 try 3794 { 3795 if (f.exists()) 3796 { 3797 f.delete(); 3798 } 3799 } 3800 catch (Exception e) 3801 { 3802 if (debugEnabled()) 3803 { 3804 TRACER.debugCaught(DebugLogLevel.ERROR, e); 3805 } 3806 } 3807 } 3808 3809 for (File f : tempFileList) 3810 { 3811 try 3812 { 3813 if (f.exists()) 3814 { 3815 f.delete(); 3816 } 3817 } 3818 catch (Exception e) 3819 { 3820 if (debugEnabled()) 3821 { 3822 TRACER.debugCaught(DebugLogLevel.ERROR, e); 3823 } 3824 } 3825 } 3826 } 3827 3828 3829 3830 /** 3831 * Creates a copy of the specified file. 3832 * 3833 * @param from The source file to be copied. 3834 * @param to The destination file to be created. 3835 * 3836 * @throws IOException If a problem occurs. 3837 */ 3838 private void copyFile(File from, File to) 3839 throws IOException 3840 { 3841 byte[] buffer = new byte[4096]; 3842 FileInputStream inputStream = null; 3843 FileOutputStream outputStream = null; 3844 try 3845 { 3846 inputStream = new FileInputStream(from); 3847 outputStream = new FileOutputStream(to, false); 3848 3849 int bytesRead = inputStream.read(buffer); 3850 while (bytesRead > 0) 3851 { 3852 outputStream.write(buffer, 0, bytesRead); 3853 bytesRead = inputStream.read(buffer); 3854 } 3855 } 3856 finally 3857 { 3858 if (inputStream != null) 3859 { 3860 try 3861 { 3862 inputStream.close(); 3863 } 3864 catch (Exception e) 3865 { 3866 if (debugEnabled()) 3867 { 3868 TRACER.debugCaught(DebugLogLevel.ERROR, e); 3869 } 3870 } 3871 } 3872 3873 if (outputStream != null) 3874 { 3875 outputStream.close(); 3876 } 3877 } 3878 } 3879 3880 3881 3882 /** 3883 * Performs any necessary cleanup in an attempt to delete any temporary schema 3884 * files that may have been left over after trying to install the new schema. 3885 * 3886 * @param tempSchemaFiles The set of temporary schema files that have been 3887 * created and are candidates for cleanup. 3888 */ 3889 private void cleanUpTempSchemaFiles(HashMap<String,File> tempSchemaFiles) 3890 { 3891 if ((tempSchemaFiles == null) || tempSchemaFiles.isEmpty()) 3892 { 3893 return; 3894 } 3895 3896 for (File f : tempSchemaFiles.values()) 3897 { 3898 try 3899 { 3900 if (f.exists()) 3901 { 3902 f.delete(); 3903 } 3904 } 3905 catch (Exception e) 3906 { 3907 if (debugEnabled()) 3908 { 3909 TRACER.debugCaught(DebugLogLevel.ERROR, e); 3910 } 3911 } 3912 } 3913 } 3914 3915 3916 3917 /** 3918 * {@inheritDoc} 3919 */ 3920 @Override() 3921 public void renameEntry(DN currentDN, Entry entry, 3922 ModifyDNOperation modifyDNOperation) 3923 throws DirectoryException 3924 { 3925 Message message = 3926 ERR_SCHEMA_MODIFY_DN_NOT_SUPPORTED.get(String.valueOf(currentDN)); 3927 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 3928 } 3929 3930 3931 3932 /** 3933 * {@inheritDoc} 3934 */ 3935 @Override() 3936 public void search(SearchOperation searchOperation) 3937 throws DirectoryException 3938 { 3939 DN baseDN = searchOperation.getBaseDN(); 3940 3941 boolean found = false; 3942 DN[] dnArray = baseDNs; 3943 DN matchedDN = null; 3944 for (DN dn : dnArray) 3945 { 3946 if (dn.equals(baseDN)) 3947 { 3948 found = true; 3949 break; 3950 } 3951 else if (dn.isAncestorOf(baseDN)) 3952 { 3953 matchedDN = dn; 3954 break; 3955 } 3956 } 3957 3958 if (! found) 3959 { 3960 Message message = ERR_SCHEMA_INVALID_BASE.get(String.valueOf(baseDN)); 3961 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, message, 3962 matchedDN, null); 3963 } 3964 3965 3966 // If it's a onelevel or subordinate subtree search, then we will never 3967 // match anything since there isn't anything below the schema. 3968 SearchScope scope = searchOperation.getScope(); 3969 if ((scope == SearchScope.SINGLE_LEVEL) || 3970 (scope == SearchScope.SUBORDINATE_SUBTREE)) 3971 { 3972 return; 3973 } 3974 3975 3976 // Get the schema entry and see if it matches the filter. If so, then send 3977 // it to the client. 3978 Entry schemaEntry = getSchemaEntry(baseDN, false); 3979 SearchFilter filter = searchOperation.getFilter(); 3980 if (filter.matchesEntry(schemaEntry)) 3981 { 3982 searchOperation.returnEntry(schemaEntry, null); 3983 } 3984 } 3985 3986 3987 3988 /** 3989 * {@inheritDoc} 3990 */ 3991 @Override() 3992 public HashSet<String> getSupportedControls() 3993 { 3994 return supportedControls; 3995 } 3996 3997 3998 3999 /** 4000 * {@inheritDoc} 4001 */ 4002 @Override() 4003 public HashSet<String> getSupportedFeatures() 4004 { 4005 return supportedFeatures; 4006 } 4007 4008 4009 4010 /** 4011 * {@inheritDoc} 4012 */ 4013 @Override() 4014 public boolean supportsLDIFExport() 4015 { 4016 // We will only export the DSE entry itself. 4017 return true; 4018 } 4019 4020 4021 4022 /** 4023 * {@inheritDoc} 4024 */ 4025 @Override() 4026 public void exportLDIF(LDIFExportConfig exportConfig) 4027 throws DirectoryException 4028 { 4029 // Create the LDIF writer. 4030 LDIFWriter ldifWriter; 4031 try 4032 { 4033 ldifWriter = new LDIFWriter(exportConfig); 4034 } 4035 catch (Exception e) 4036 { 4037 if (debugEnabled()) 4038 { 4039 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4040 } 4041 4042 Message message = ERR_SCHEMA_UNABLE_TO_CREATE_LDIF_WRITER.get( 4043 stackTraceToSingleLineString(e)); 4044 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4045 message); 4046 } 4047 4048 4049 // Write the root schema entry to it. Make sure to close the LDIF 4050 // writer when we're done. 4051 try 4052 { 4053 ldifWriter.writeEntry(getSchemaEntry(baseDNs[0], true)); 4054 } 4055 catch (Exception e) 4056 { 4057 if (debugEnabled()) 4058 { 4059 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4060 } 4061 4062 Message message = 4063 ERR_SCHEMA_UNABLE_TO_EXPORT_BASE.get(stackTraceToSingleLineString(e)); 4064 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4065 message); 4066 } 4067 finally 4068 { 4069 try 4070 { 4071 ldifWriter.close(); 4072 } 4073 catch (Exception e) 4074 { 4075 if (debugEnabled()) 4076 { 4077 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4078 } 4079 } 4080 } 4081 } 4082 4083 4084 4085 /** 4086 * {@inheritDoc} 4087 */ 4088 @Override() 4089 public boolean supportsLDIFImport() 4090 { 4091 return true; 4092 } 4093 4094 4095 4096 /** 4097 * {@inheritDoc} 4098 */ 4099 @Override() 4100 public LDIFImportResult importLDIF(LDIFImportConfig importConfig) 4101 throws DirectoryException 4102 { 4103 LDIFReader reader; 4104 try 4105 { 4106 reader = new LDIFReader(importConfig); 4107 } 4108 catch (Exception e) 4109 { 4110 Message message = 4111 ERR_MEMORYBACKEND_CANNOT_CREATE_LDIF_READER.get(String.valueOf(e)); 4112 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4113 message, e); 4114 } 4115 4116 4117 try 4118 { 4119 while (true) 4120 { 4121 Entry e = null; 4122 try 4123 { 4124 e = reader.readEntry(); 4125 if (e == null) 4126 { 4127 break; 4128 } 4129 } 4130 catch (LDIFException le) 4131 { 4132 if (! le.canContinueReading()) 4133 { 4134 Message message = 4135 ERR_MEMORYBACKEND_ERROR_READING_LDIF.get(String.valueOf(e)); 4136 throw new DirectoryException( 4137 DirectoryServer.getServerErrorResultCode(), 4138 message, le); 4139 } 4140 else 4141 { 4142 continue; 4143 } 4144 } 4145 4146 importEntry(e); 4147 } 4148 4149 return new LDIFImportResult(reader.getEntriesRead(), 4150 reader.getEntriesRejected(), 4151 reader.getEntriesIgnored()); 4152 } 4153 catch (DirectoryException de) 4154 { 4155 throw de; 4156 } 4157 catch (Exception e) 4158 { 4159 Message message = 4160 ERR_MEMORYBACKEND_ERROR_DURING_IMPORT.get(String.valueOf(e)); 4161 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4162 message, e); 4163 } 4164 finally 4165 { 4166 reader.close(); 4167 } 4168 } 4169 4170 4171 /** 4172 * Import an entry in a new schema by : 4173 * - duplicating the schema 4174 * - iterating over each element of the newSchemaEntry and comparing 4175 * with the xisting schema 4176 * - if the new schema element do not exist : add it 4177 * - if the new schema. 4178 * 4179 * FIXME : attributeTypes and objectClasses are the only elements 4180 * currently taken into account. 4181 * 4182 * @param newSchemaEntry The entry to be imported. 4183 */ 4184 private void importEntry(Entry newSchemaEntry) 4185 throws DirectoryException 4186 { 4187 Schema schema = DirectoryServer.getSchema(); 4188 Schema newSchema = DirectoryServer.getSchema().duplicate(); 4189 TreeSet<String> modifiedSchemaFiles = new TreeSet<String>(); 4190 4191 // Get the attributeTypes attribute from the entry. 4192 AttributeTypeSyntax attrTypeSyntax; 4193 try 4194 { 4195 attrTypeSyntax = (AttributeTypeSyntax) 4196 schema.getSyntax(SYNTAX_ATTRIBUTE_TYPE_OID); 4197 if (attrTypeSyntax == null) 4198 { 4199 attrTypeSyntax = new AttributeTypeSyntax(); 4200 attrTypeSyntax.initializeSyntax(null); 4201 } 4202 } 4203 catch (Exception e) 4204 { 4205 if (debugEnabled()) 4206 { 4207 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4208 } 4209 4210 attrTypeSyntax = new AttributeTypeSyntax(); 4211 } 4212 4213 AttributeType attributeAttrType = 4214 schema.getAttributeType(ATTR_ATTRIBUTE_TYPES_LC); 4215 if (attributeAttrType == null) 4216 { 4217 attributeAttrType = 4218 DirectoryServer.getDefaultAttributeType(ATTR_ATTRIBUTE_TYPES, 4219 attrTypeSyntax); 4220 } 4221 4222 // loop on the attribute types in the entry just received 4223 // and add them in the existing schema. 4224 List<Attribute> attrList = newSchemaEntry.getAttribute(attributeAttrType); 4225 Set<String> oidList = new HashSet<String>(1000); 4226 if ((attrList != null) && (! attrList.isEmpty())) 4227 { 4228 for (Attribute a : attrList) 4229 { 4230 // Look for attributetypes that could have been added to the schema 4231 // or modified in the schema 4232 for (AttributeValue v : a.getValues()) 4233 { 4234 // Parse the attribute type. 4235 AttributeType attrType = AttributeTypeSyntax.decodeAttributeType( 4236 v.getValue(), schema, false); 4237 String schemaFile = attrType.getSchemaFile(); 4238 if (schemaFile.equals(CONFIG_SCHEMA_ELEMENTS_FILE)) 4239 { 4240 // Don't import the file containing the definitions of the 4241 // Schema elements used for configuration because these 4242 // definitions may vary between versions of OpenDS. 4243 continue; 4244 } 4245 4246 oidList.add(attrType.getOID()); 4247 try 4248 { 4249 // Register this attribute type in the new schema 4250 // unless it is already defined with the same syntax. 4251 AttributeType oldAttrType = 4252 schema.getAttributeType(attrType.getOID()); 4253 if ((oldAttrType == null) || 4254 (!oldAttrType.toString().equals(attrType.toString()))) 4255 { 4256 newSchema.registerAttributeType(attrType, true); 4257 4258 if (schemaFile != null) 4259 { 4260 modifiedSchemaFiles.add(schemaFile); 4261 } 4262 } 4263 } 4264 catch (DirectoryException de) 4265 { 4266 Message message = 4267 NOTE_SCHEMA_IMPORT_FAILED.get( 4268 attrType.toString(), de.getMessage()); 4269 logError(message); 4270 } 4271 catch (Exception e) 4272 { 4273 Message message = 4274 NOTE_SCHEMA_IMPORT_FAILED.get( 4275 attrType.toString(), e.getMessage()); 4276 logError(message); 4277 } 4278 } 4279 } 4280 } 4281 4282 // loop on all the attribute types in the current schema and delete 4283 // them from the new schema if they are not in the imported schema entry. 4284 ConcurrentHashMap<String, AttributeType> currentAttrTypes = 4285 newSchema.getAttributeTypes(); 4286 4287 for (AttributeType removeType : currentAttrTypes.values()) 4288 { 4289 String schemaFile = removeType.getSchemaFile(); 4290 if (schemaFile.equals(CONFIG_SCHEMA_ELEMENTS_FILE)) 4291 { 4292 // Don't import the file containing the definitiong of the 4293 // Schema elements used for configuration because these 4294 // definitions may vary between versions of OpenDS. 4295 continue; 4296 } 4297 if (!oidList.contains(removeType.getOID())) 4298 { 4299 newSchema.deregisterAttributeType(removeType); 4300 4301 if (schemaFile != null) 4302 { 4303 modifiedSchemaFiles.add(schemaFile); 4304 } 4305 } 4306 } 4307 4308 // loop on the objectClasses from the entry, search if they are 4309 // already in the current schema, add them if not. 4310 ObjectClassSyntax ocSyntax; 4311 try 4312 { 4313 ocSyntax = (ObjectClassSyntax) schema.getSyntax(SYNTAX_OBJECTCLASS_OID); 4314 if (ocSyntax == null) 4315 { 4316 ocSyntax = new ObjectClassSyntax(); 4317 ocSyntax.initializeSyntax(null); 4318 } 4319 } 4320 catch (Exception e) 4321 { 4322 if (debugEnabled()) 4323 { 4324 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4325 } 4326 4327 ocSyntax = new ObjectClassSyntax(); 4328 } 4329 4330 AttributeType objectclassAttrType = 4331 schema.getAttributeType(ATTR_OBJECTCLASSES_LC); 4332 if (objectclassAttrType == null) 4333 { 4334 objectclassAttrType = 4335 DirectoryServer.getDefaultAttributeType(ATTR_OBJECTCLASSES, 4336 ocSyntax); 4337 } 4338 4339 oidList.clear(); 4340 List<Attribute> ocList = newSchemaEntry.getAttribute(objectclassAttrType); 4341 if ((ocList != null) && (! ocList.isEmpty())) 4342 { 4343 for (Attribute a : ocList) 4344 { 4345 for (AttributeValue v : a.getValues()) 4346 { 4347 // It IS important here to allow the unknown elements that could 4348 // appear in the new config schema. 4349 ObjectClass newObjectClass = ObjectClassSyntax.decodeObjectClass( 4350 v.getValue(), newSchema, true); 4351 String schemaFile = newObjectClass.getSchemaFile(); 4352 if (schemaFile.equals(CONFIG_SCHEMA_ELEMENTS_FILE)) 4353 { 4354 // Don't import the file containing the definitions of the 4355 // Schema elements used for configuration because these 4356 // definitions may vary between versions of OpenDS. 4357 continue; 4358 } 4359 4360 // Now we know we are not in the config schema, let's check 4361 // the unknown elements ... sadly but simply by redoing the 4362 // whole decoding. 4363 newObjectClass = ObjectClassSyntax.decodeObjectClass( 4364 v.getValue(), newSchema, false); 4365 oidList.add(newObjectClass.getOID()); 4366 try 4367 { 4368 // Register this ObjectClass in the new schema 4369 // unless it is already defined with the same syntax. 4370 ObjectClass oldObjectClass = 4371 schema.getObjectClass(newObjectClass.getOID()); 4372 if ((oldObjectClass == null) || 4373 (!oldObjectClass.toString().equals(newObjectClass.toString()))) 4374 { 4375 newSchema.registerObjectClass(newObjectClass, true); 4376 4377 if (schemaFile != null) 4378 { 4379 modifiedSchemaFiles.add(schemaFile); 4380 } 4381 } 4382 } 4383 catch (DirectoryException de) 4384 { 4385 Message message = 4386 NOTE_SCHEMA_IMPORT_FAILED.get( 4387 newObjectClass.toString(), de.getMessage()); 4388 logError(message); 4389 } 4390 catch (Exception e) 4391 { 4392 Message message = 4393 NOTE_SCHEMA_IMPORT_FAILED.get( 4394 newObjectClass.toString(), e.getMessage()); 4395 logError(message); 4396 } 4397 } 4398 } 4399 } 4400 4401 // loop on all the attribute types in the current schema and delete 4402 // them from the new schema if they are not in the imported schema entry. 4403 ConcurrentHashMap<String, ObjectClass> currentObjectClasses = 4404 newSchema.getObjectClasses(); 4405 4406 for (ObjectClass removeClass : currentObjectClasses.values()) 4407 { 4408 String schemaFile = removeClass.getSchemaFile(); 4409 if (schemaFile.equals(CONFIG_SCHEMA_ELEMENTS_FILE)) 4410 { 4411 // Don't import the file containing the definitiong of the 4412 // Schema elements used for configuration because these 4413 // definitions may vary between versions of OpenDS. 4414 continue; 4415 } 4416 if (!oidList.contains(removeClass.getOID())) 4417 { 4418 newSchema.deregisterObjectClass(removeClass); 4419 4420 if (schemaFile != null) 4421 { 4422 modifiedSchemaFiles.add(schemaFile); 4423 } 4424 } 4425 } 4426 4427 // Finally, if there were some modifications, save the new schema 4428 // in the Schema Files and update DirectoryServer. 4429 if (!modifiedSchemaFiles.isEmpty()) 4430 { 4431 updateSchemaFiles(newSchema, modifiedSchemaFiles); 4432 DirectoryServer.setSchema(newSchema); 4433 } 4434 } 4435 4436 4437 4438 /** 4439 * {@inheritDoc} 4440 */ 4441 @Override() 4442 public boolean supportsBackup() 4443 { 4444 // We do support an online backup mechanism for the schema. 4445 return true; 4446 } 4447 4448 4449 4450 /** 4451 * {@inheritDoc} 4452 */ 4453 @Override() 4454 public boolean supportsBackup(BackupConfig backupConfig, 4455 StringBuilder unsupportedReason) 4456 { 4457 // We should support online backup for the schema in any form. This 4458 // implementation does not support incremental backups, but in this case 4459 // even if we're asked to do an incremental we'll just do a full backup 4460 // instead. So the answer to this should always be "true". 4461 return true; 4462 } 4463 4464 4465 4466 /** 4467 * {@inheritDoc} 4468 */ 4469 @Override() 4470 public void createBackup(BackupConfig backupConfig) 4471 throws DirectoryException 4472 { 4473 // Get the properties to use for the backup. We don't care whether or not 4474 // it's incremental, so there's no need to get that. 4475 String backupID = backupConfig.getBackupID(); 4476 BackupDirectory backupDirectory = backupConfig.getBackupDirectory(); 4477 boolean compress = backupConfig.compressData(); 4478 boolean encrypt = backupConfig.encryptData(); 4479 boolean hash = backupConfig.hashData(); 4480 boolean signHash = backupConfig.signHash(); 4481 4482 4483 // Create a hash map that will hold the extra backup property information 4484 // for this backup. 4485 HashMap<String,String> backupProperties = new HashMap<String,String>(); 4486 4487 4488 // Get the crypto manager and use it to obtain references to the message 4489 // digest and/or MAC to use for hashing and/or signing. 4490 CryptoManager cryptoManager = DirectoryServer.getCryptoManager(); 4491 Mac mac = null; 4492 MessageDigest digest = null; 4493 String digestAlgorithm = null; 4494 String macKeyID = null; 4495 4496 if (hash) 4497 { 4498 if (signHash) 4499 { 4500 try 4501 { 4502 macKeyID = cryptoManager.getMacEngineKeyEntryID(); 4503 backupProperties.put(BACKUP_PROPERTY_MAC_KEY_ID, macKeyID); 4504 4505 mac = cryptoManager.getMacEngine(macKeyID); 4506 } 4507 catch (Exception e) 4508 { 4509 if (debugEnabled()) 4510 { 4511 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4512 } 4513 4514 Message message = ERR_SCHEMA_BACKUP_CANNOT_GET_MAC.get( 4515 macKeyID, stackTraceToSingleLineString(e)); 4516 throw new DirectoryException( 4517 DirectoryServer.getServerErrorResultCode(), message, 4518 e); 4519 } 4520 } 4521 else 4522 { 4523 digestAlgorithm = cryptoManager.getPreferredMessageDigestAlgorithm(); 4524 backupProperties.put(BACKUP_PROPERTY_DIGEST_ALGORITHM, digestAlgorithm); 4525 4526 try 4527 { 4528 digest = cryptoManager.getPreferredMessageDigest(); 4529 } 4530 catch (Exception e) 4531 { 4532 if (debugEnabled()) 4533 { 4534 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4535 } 4536 4537 Message message = ERR_SCHEMA_BACKUP_CANNOT_GET_DIGEST.get( 4538 digestAlgorithm, stackTraceToSingleLineString(e)); 4539 throw new DirectoryException( 4540 DirectoryServer.getServerErrorResultCode(), message, 4541 e); 4542 } 4543 } 4544 } 4545 4546 4547 // Create an output stream that will be used to write the archive file. At 4548 // its core, it will be a file output stream to put a file on the disk. If 4549 // we are to encrypt the data, then that file output stream will be wrapped 4550 // in a cipher output stream. The resulting output stream will then be 4551 // wrapped by a zip output stream (which may or may not actually use 4552 // compression). 4553 String filename = null; 4554 OutputStream outputStream; 4555 try 4556 { 4557 filename = SCHEMA_BACKUP_BASE_FILENAME + backupID; 4558 File archiveFile = new File(backupDirectory.getPath() + File.separator + 4559 filename); 4560 if (archiveFile.exists()) 4561 { 4562 int i=1; 4563 while (true) 4564 { 4565 archiveFile = new File(backupDirectory.getPath() + File.separator + 4566 filename + "." + i); 4567 if (archiveFile.exists()) 4568 { 4569 i++; 4570 } 4571 else 4572 { 4573 filename = filename + "." + i; 4574 break; 4575 } 4576 } 4577 } 4578 4579 outputStream = new FileOutputStream(archiveFile, false); 4580 backupProperties.put(BACKUP_PROPERTY_ARCHIVE_FILENAME, filename); 4581 } 4582 catch (Exception e) 4583 { 4584 if (debugEnabled()) 4585 { 4586 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4587 } 4588 4589 Message message = ERR_SCHEMA_BACKUP_CANNOT_CREATE_ARCHIVE_FILE. 4590 get(String.valueOf(filename), backupDirectory.getPath(), 4591 getExceptionMessage(e)); 4592 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4593 message, e); 4594 } 4595 4596 4597 // If we should encrypt the data, then wrap the output stream in a cipher 4598 // output stream. 4599 if (encrypt) 4600 { 4601 try 4602 { 4603 outputStream 4604 = cryptoManager.getCipherOutputStream(outputStream); 4605 } 4606 catch (CryptoManagerException e) 4607 { 4608 if (debugEnabled()) 4609 { 4610 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4611 } 4612 4613 Message message = ERR_SCHEMA_BACKUP_CANNOT_GET_CIPHER.get( 4614 stackTraceToSingleLineString(e)); 4615 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4616 message, e); 4617 } 4618 } 4619 4620 4621 // Wrap the file output stream in a zip output stream. 4622 ZipOutputStream zipStream = new ZipOutputStream(outputStream); 4623 4624 Message message = ERR_SCHEMA_BACKUP_ZIP_COMMENT.get( 4625 DynamicConstants.PRODUCT_NAME, 4626 backupID); 4627 zipStream.setComment(String.valueOf(message)); 4628 4629 if (compress) 4630 { 4631 zipStream.setLevel(Deflater.DEFAULT_COMPRESSION); 4632 } 4633 else 4634 { 4635 zipStream.setLevel(Deflater.NO_COMPRESSION); 4636 } 4637 4638 4639 // Get the path to the directory in which the schema files reside and 4640 // then get a list of all the files in that directory. 4641 String schemaDirPath = SchemaConfigManager.getSchemaDirectoryPath(); 4642 File[] schemaFiles; 4643 try 4644 { 4645 File schemaDir = new File(schemaDirPath); 4646 schemaFiles = schemaDir.listFiles(); 4647 } 4648 catch (Exception e) 4649 { 4650 if (debugEnabled()) 4651 { 4652 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4653 } 4654 4655 message = ERR_SCHEMA_BACKUP_CANNOT_LIST_SCHEMA_FILES.get( 4656 schemaDirPath, getExceptionMessage(e)); 4657 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4658 message, e); 4659 } 4660 4661 4662 // Iterate through the schema files and write them to the zip stream. If 4663 // we're using a hash or MAC, then calculate that as well. 4664 byte[] buffer = new byte[8192]; 4665 for (File schemaFile : schemaFiles) 4666 { 4667 if (backupConfig.isCancelled()) 4668 { 4669 break; 4670 } 4671 4672 if (! schemaFile.isFile()) 4673 { 4674 // If there are any non-file items in the directory (e.g., one or more 4675 // subdirectories), then we'll skip them. 4676 continue; 4677 } 4678 4679 String baseName = schemaFile.getName(); 4680 4681 4682 // We'll put the name in the hash, too. 4683 if (hash) 4684 { 4685 if (signHash) 4686 { 4687 mac.update(getBytes(baseName)); 4688 } 4689 else 4690 { 4691 digest.update(getBytes(baseName)); 4692 } 4693 } 4694 4695 InputStream inputStream = null; 4696 try 4697 { 4698 ZipEntry zipEntry = new ZipEntry(baseName); 4699 zipStream.putNextEntry(zipEntry); 4700 4701 inputStream = new FileInputStream(schemaFile); 4702 while (true) 4703 { 4704 int bytesRead = inputStream.read(buffer); 4705 if (bytesRead < 0 || backupConfig.isCancelled()) 4706 { 4707 break; 4708 } 4709 4710 if (hash) 4711 { 4712 if (signHash) 4713 { 4714 mac.update(buffer, 0, bytesRead); 4715 } 4716 else 4717 { 4718 digest.update(buffer, 0, bytesRead); 4719 } 4720 } 4721 4722 zipStream.write(buffer, 0, bytesRead); 4723 } 4724 4725 zipStream.closeEntry(); 4726 inputStream.close(); 4727 } 4728 catch (Exception e) 4729 { 4730 if (debugEnabled()) 4731 { 4732 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4733 } 4734 4735 try 4736 { 4737 inputStream.close(); 4738 } catch (Exception e2) {} 4739 4740 try 4741 { 4742 zipStream.close(); 4743 } catch (Exception e2) {} 4744 4745 message = ERR_SCHEMA_BACKUP_CANNOT_BACKUP_SCHEMA_FILE.get( 4746 baseName, stackTraceToSingleLineString(e)); 4747 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4748 message, e); 4749 } 4750 } 4751 4752 4753 // We're done writing the file, so close the zip stream (which should also 4754 // close the underlying stream). 4755 try 4756 { 4757 zipStream.close(); 4758 } 4759 catch (Exception e) 4760 { 4761 if (debugEnabled()) 4762 { 4763 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4764 } 4765 4766 message = ERR_SCHEMA_BACKUP_CANNOT_CLOSE_ZIP_STREAM.get( 4767 filename, backupDirectory.getPath(), stackTraceToSingleLineString(e)); 4768 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4769 message, e); 4770 } 4771 4772 4773 // Get the digest or MAC bytes if appropriate. 4774 byte[] digestBytes = null; 4775 byte[] macBytes = null; 4776 if (hash) 4777 { 4778 if (signHash) 4779 { 4780 macBytes = mac.doFinal(); 4781 } 4782 else 4783 { 4784 digestBytes = digest.digest(); 4785 } 4786 } 4787 4788 4789 // Create the backup info structure for this backup and add it to the backup 4790 // directory. 4791 // FIXME -- Should I use the date from when I started or finished? 4792 BackupInfo backupInfo = new BackupInfo(backupDirectory, backupID, 4793 new Date(), false, compress, 4794 encrypt, digestBytes, macBytes, 4795 null, backupProperties); 4796 4797 try 4798 { 4799 backupDirectory.addBackup(backupInfo); 4800 backupDirectory.writeBackupDirectoryDescriptor(); 4801 } 4802 catch (Exception e) 4803 { 4804 if (debugEnabled()) 4805 { 4806 TRACER.debugCaught(DebugLogLevel.ERROR, e); 4807 } 4808 4809 message = ERR_SCHEMA_BACKUP_CANNOT_UPDATE_BACKUP_DESCRIPTOR.get( 4810 backupDirectory.getDescriptorPath(), stackTraceToSingleLineString(e)); 4811 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4812 message, e); 4813 } 4814 } 4815 4816 4817 4818 /** 4819 * {@inheritDoc} 4820 */ 4821 @Override() 4822 public void removeBackup(BackupDirectory backupDirectory, 4823 String backupID) 4824 throws DirectoryException 4825 { 4826 // This backend does not provide a backup/restore mechanism. 4827 Message message = ERR_SCHEMA_BACKUP_AND_RESTORE_NOT_SUPPORTED.get(); 4828 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, message); 4829 } 4830 4831 4832 4833 /** 4834 * {@inheritDoc} 4835 */ 4836 @Override() 4837 public boolean supportsRestore() 4838 { 4839 // We will provide a restore, but only for offline operations. 4840 return true; 4841 } 4842 4843 4844 4845 /** 4846 * {@inheritDoc} 4847 */ 4848 @Override() 4849 public void restoreBackup(RestoreConfig restoreConfig) 4850 throws DirectoryException 4851 { 4852 // First, make sure that the requested backup exists. 4853 BackupDirectory backupDirectory = restoreConfig.getBackupDirectory(); 4854 String backupPath = backupDirectory.getPath(); 4855 String backupID = restoreConfig.getBackupID(); 4856 BackupInfo backupInfo = backupDirectory.getBackupInfo(backupID); 4857 if (backupInfo == null) 4858 { 4859 Message message = 4860 ERR_SCHEMA_RESTORE_NO_SUCH_BACKUP.get(backupID, backupPath); 4861 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4862 message); 4863 } 4864 4865 4866 // Read the backup info structure to determine the name of the file that 4867 // contains the archive. Then make sure that file exists. 4868 String backupFilename = 4869 backupInfo.getBackupProperty(BACKUP_PROPERTY_ARCHIVE_FILENAME); 4870 if (backupFilename == null) 4871 { 4872 Message message = 4873 ERR_SCHEMA_RESTORE_NO_BACKUP_FILE.get(backupID, backupPath); 4874 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4875 message); 4876 } 4877 4878 File backupFile = new File(backupPath + File.separator + backupFilename); 4879 try 4880 { 4881 if (! backupFile.exists()) 4882 { 4883 Message message = 4884 ERR_SCHEMA_RESTORE_NO_SUCH_FILE.get(backupID, backupFile.getPath()); 4885 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4886 message); 4887 } 4888 } 4889 catch (DirectoryException de) 4890 { 4891 throw de; 4892 } 4893 catch (Exception e) 4894 { 4895 Message message = ERR_SCHEMA_RESTORE_CANNOT_CHECK_FOR_ARCHIVE.get( 4896 backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); 4897 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4898 message, e); 4899 } 4900 4901 4902 // If the backup is hashed, then we need to get the message digest to use 4903 // to verify it. 4904 byte[] unsignedHash = backupInfo.getUnsignedHash(); 4905 MessageDigest digest = null; 4906 if (unsignedHash != null) 4907 { 4908 String digestAlgorithm = 4909 backupInfo.getBackupProperty(BACKUP_PROPERTY_DIGEST_ALGORITHM); 4910 if (digestAlgorithm == null) 4911 { 4912 Message message = ERR_SCHEMA_RESTORE_UNKNOWN_DIGEST.get(backupID); 4913 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4914 message); 4915 } 4916 4917 try 4918 { 4919 digest = DirectoryServer.getCryptoManager().getMessageDigest( 4920 digestAlgorithm); 4921 } 4922 catch (Exception e) 4923 { 4924 Message message = 4925 ERR_SCHEMA_RESTORE_CANNOT_GET_DIGEST.get(backupID, digestAlgorithm); 4926 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4927 message, e); 4928 } 4929 } 4930 4931 4932 // If the backup is signed, then we need to get the MAC to use to verify it. 4933 byte[] signedHash = backupInfo.getSignedHash(); 4934 Mac mac = null; 4935 if (signedHash != null) 4936 { 4937 String macKeyID = 4938 backupInfo.getBackupProperty(BACKUP_PROPERTY_MAC_KEY_ID); 4939 if (macKeyID == null) 4940 { 4941 Message message = ERR_SCHEMA_RESTORE_UNKNOWN_MAC.get(backupID); 4942 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4943 message); 4944 } 4945 4946 try 4947 { 4948 mac = DirectoryServer.getCryptoManager().getMacEngine(macKeyID); 4949 } 4950 catch (Exception e) 4951 { 4952 Message message = ERR_SCHEMA_RESTORE_CANNOT_GET_MAC.get( 4953 backupID, macKeyID); 4954 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4955 message, e); 4956 } 4957 } 4958 4959 4960 // Create the input stream that will be used to read the backup file. At 4961 // its core, it will be a file input stream. 4962 InputStream inputStream; 4963 try 4964 { 4965 inputStream = new FileInputStream(backupFile); 4966 } 4967 catch (Exception e) 4968 { 4969 Message message = ERR_SCHEMA_RESTORE_CANNOT_OPEN_BACKUP_FILE.get( 4970 backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); 4971 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4972 message, e); 4973 } 4974 4975 // If the backup is encrypted, then we need to wrap the file input stream 4976 // in a cipher input stream. 4977 if (backupInfo.isEncrypted()) 4978 { 4979 try 4980 { 4981 inputStream = DirectoryServer.getCryptoManager() 4982 .getCipherInputStream(inputStream); 4983 } 4984 catch (CryptoManagerException e) 4985 { 4986 Message message = ERR_SCHEMA_RESTORE_CANNOT_GET_CIPHER.get( 4987 backupFile.getPath(), stackTraceToSingleLineString(e)); 4988 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 4989 message, e); 4990 } 4991 } 4992 4993 // Now wrap the resulting input stream in a zip stream so that we can read 4994 // its contents. We don't need to worry about whether to use compression or 4995 // not because it will be handled automatically. 4996 ZipInputStream zipStream = new ZipInputStream(inputStream); 4997 4998 4999 // Determine whether we should actually do the restore, or if we should just 5000 // try to verify the archive. If we are not going to verify only, then 5001 // move the current schema directory out of the way so we can keep it around 5002 // to restore if a problem occurs. 5003 String schemaDirPath = SchemaConfigManager.getSchemaDirectoryPath(); 5004 File schemaDir = new File(schemaDirPath); 5005 String backupDirPath = null; 5006 File schemaBackupDir = null; 5007 boolean verifyOnly = restoreConfig.verifyOnly(); 5008 if (! verifyOnly) 5009 { 5010 // Rename the current schema directory if it exists. 5011 try 5012 { 5013 if (schemaDir.exists()) 5014 { 5015 String schemaBackupDirPath = schemaDirPath + ".save"; 5016 backupDirPath = schemaBackupDirPath; 5017 schemaBackupDir = new File(backupDirPath); 5018 if (schemaBackupDir.exists()) 5019 { 5020 int i=2; 5021 while (true) 5022 { 5023 backupDirPath = schemaBackupDirPath + i; 5024 schemaBackupDir = new File(backupDirPath); 5025 if (schemaBackupDir.exists()) 5026 { 5027 i++; 5028 } 5029 else 5030 { 5031 break; 5032 } 5033 } 5034 } 5035 5036 schemaDir.renameTo(schemaBackupDir); 5037 } 5038 } 5039 catch (Exception e) 5040 { 5041 Message message = ERR_SCHEMA_RESTORE_CANNOT_RENAME_CURRENT_DIRECTORY. 5042 get(backupID, schemaDirPath, String.valueOf(backupDirPath), 5043 stackTraceToSingleLineString(e)); 5044 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 5045 message, e); 5046 } 5047 5048 5049 // Create a new directory to hold the restored schema files. 5050 try 5051 { 5052 schemaDir.mkdirs(); 5053 } 5054 catch (Exception e) 5055 { 5056 // Try to restore the previous schema directory if possible. This will 5057 // probably fail in this case, but try anyway. 5058 if (schemaBackupDir != null) 5059 { 5060 try 5061 { 5062 schemaBackupDir.renameTo(schemaDir); 5063 Message message = 5064 NOTE_SCHEMA_RESTORE_RESTORED_OLD_SCHEMA.get(schemaDirPath); 5065 logError(message); 5066 } 5067 catch (Exception e2) 5068 { 5069 Message message = ERR_SCHEMA_RESTORE_CANNOT_RESTORE_OLD_SCHEMA.get( 5070 schemaBackupDir.getPath()); 5071 logError(message); 5072 } 5073 } 5074 5075 5076 Message message = ERR_SCHEMA_RESTORE_CANNOT_CREATE_SCHEMA_DIRECTORY.get( 5077 backupID, schemaDirPath, stackTraceToSingleLineString(e)); 5078 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 5079 message, e); 5080 } 5081 } 5082 5083 5084 // Read through the archive file an entry at a time. For each entry, update 5085 // the digest or MAC if necessary, and if we're actually doing the restore, 5086 // then write the files out into the schema directory. 5087 byte[] buffer = new byte[8192]; 5088 while (true) 5089 { 5090 ZipEntry zipEntry; 5091 try 5092 { 5093 zipEntry = zipStream.getNextEntry(); 5094 } 5095 catch (Exception e) 5096 { 5097 // Tell the user where the previous schema was archived. 5098 if (schemaBackupDir != null) 5099 { 5100 Message message = ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED.get( 5101 schemaBackupDir.getPath()); 5102 logError(message); 5103 } 5104 5105 Message message = ERR_SCHEMA_RESTORE_CANNOT_GET_ZIP_ENTRY.get( 5106 backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); 5107 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 5108 message, e); 5109 } 5110 5111 if (zipEntry == null) 5112 { 5113 break; 5114 } 5115 5116 5117 // Get the filename for the zip entry and update the digest or MAC as 5118 // necessary. 5119 String fileName = zipEntry.getName(); 5120 if (digest != null) 5121 { 5122 digest.update(getBytes(fileName)); 5123 } 5124 if (mac != null) 5125 { 5126 mac.update(getBytes(fileName)); 5127 } 5128 5129 5130 // If we're doing the restore, then create the output stream to write the 5131 // file. 5132 OutputStream outputStream = null; 5133 if (! verifyOnly) 5134 { 5135 String filePath = schemaDirPath + File.separator + fileName; 5136 try 5137 { 5138 outputStream = new FileOutputStream(filePath); 5139 } 5140 catch (Exception e) 5141 { 5142 // Tell the user where the previous schema was archived. 5143 if (schemaBackupDir != null) 5144 { 5145 Message message = ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED.get( 5146 schemaBackupDir.getPath()); 5147 logError(message); 5148 } 5149 5150 Message message = ERR_SCHEMA_RESTORE_CANNOT_CREATE_FILE.get( 5151 backupID, filePath, stackTraceToSingleLineString(e)); 5152 throw new DirectoryException( 5153 DirectoryServer.getServerErrorResultCode(), message, 5154 e); 5155 } 5156 } 5157 5158 5159 // Read the contents of the file and update the digest or MAC as 5160 // necessary. If we're actually restoring it, then write it into the 5161 // new schema directory. 5162 try 5163 { 5164 while (true) 5165 { 5166 int bytesRead = zipStream.read(buffer); 5167 if (bytesRead < 0) 5168 { 5169 // We've reached the end of the entry. 5170 break; 5171 } 5172 5173 5174 // Update the digest or MAC if appropriate. 5175 if (digest != null) 5176 { 5177 digest.update(buffer, 0, bytesRead); 5178 } 5179 5180 if (mac != null) 5181 { 5182 mac.update(buffer, 0, bytesRead); 5183 } 5184 5185 5186 // Write the data to the output stream if appropriate. 5187 if (outputStream != null) 5188 { 5189 outputStream.write(buffer, 0, bytesRead); 5190 } 5191 } 5192 5193 5194 // We're at the end of the file so close the output stream if we're 5195 // writing it. 5196 if (outputStream != null) 5197 { 5198 outputStream.close(); 5199 } 5200 } 5201 catch (Exception e) 5202 { 5203 // Tell the user where the previous schema was archived. 5204 if (schemaBackupDir != null) 5205 { 5206 Message message = ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED.get( 5207 schemaBackupDir.getPath()); 5208 logError(message); 5209 } 5210 5211 Message message = ERR_SCHEMA_RESTORE_CANNOT_PROCESS_ARCHIVE_FILE.get( 5212 backupID, fileName, stackTraceToSingleLineString(e)); 5213 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 5214 message, e); 5215 } 5216 } 5217 5218 5219 // Close the zip stream since we don't need it anymore. 5220 try 5221 { 5222 zipStream.close(); 5223 } 5224 catch (Exception e) 5225 { 5226 Message message = ERR_SCHEMA_RESTORE_ERROR_ON_ZIP_STREAM_CLOSE.get( 5227 backupID, backupFile.getPath(), stackTraceToSingleLineString(e)); 5228 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 5229 message, e); 5230 } 5231 5232 5233 // At this point, we should be done with the contents of the ZIP file and 5234 // the restore should be complete. If we were generating a digest or MAC, 5235 // then make sure it checks out. 5236 if (digest != null) 5237 { 5238 byte[] calculatedHash = digest.digest(); 5239 if (Arrays.equals(calculatedHash, unsignedHash)) 5240 { 5241 Message message = NOTE_SCHEMA_RESTORE_UNSIGNED_HASH_VALID.get(); 5242 logError(message); 5243 } 5244 else 5245 { 5246 // Tell the user where the previous schema was archived. 5247 if (schemaBackupDir != null) 5248 { 5249 Message message = ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED.get( 5250 schemaBackupDir.getPath()); 5251 logError(message); 5252 } 5253 5254 Message message = 5255 ERR_SCHEMA_RESTORE_UNSIGNED_HASH_INVALID.get(backupID); 5256 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 5257 message); 5258 } 5259 } 5260 5261 if (mac != null) 5262 { 5263 byte[] calculatedSignature = mac.doFinal(); 5264 if (Arrays.equals(calculatedSignature, signedHash)) 5265 { 5266 Message message = NOTE_SCHEMA_RESTORE_SIGNED_HASH_VALID.get(); 5267 logError(message); 5268 } 5269 else 5270 { 5271 // Tell the user where the previous schema was archived. 5272 if (schemaBackupDir != null) 5273 { 5274 Message message = ERR_SCHEMA_RESTORE_OLD_SCHEMA_SAVED.get( 5275 schemaBackupDir.getPath()); 5276 logError(message); 5277 } 5278 5279 Message message = ERR_SCHEMA_RESTORE_SIGNED_HASH_INVALID.get( 5280 schemaBackupDir.getPath()); 5281 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 5282 message); 5283 } 5284 } 5285 5286 5287 // If we are just verifying the archive, then we're done. 5288 if (verifyOnly) 5289 { 5290 Message message = 5291 NOTE_SCHEMA_RESTORE_VERIFY_SUCCESSFUL.get(backupID, backupPath); 5292 logError(message); 5293 return; 5294 } 5295 5296 5297 // If we've gotten here, then the archive was restored successfully. Get 5298 // rid of the temporary copy we made of the previous schema directory and 5299 // exit. 5300 if (schemaBackupDir != null) 5301 { 5302 recursiveDelete(schemaBackupDir); 5303 } 5304 5305 Message message = NOTE_SCHEMA_RESTORE_SUCCESSFUL.get(backupID, backupPath); 5306 logError(message); 5307 } 5308 5309 5310 5311 /** 5312 * {@inheritDoc} 5313 */ 5314 public boolean isConfigurationChangeAcceptable( 5315 SchemaBackendCfg configEntry, 5316 List<Message> unacceptableReasons) 5317 { 5318 return true; 5319 } 5320 5321 5322 5323 /** 5324 * {@inheritDoc} 5325 */ 5326 public ConfigChangeResult applyConfigurationChange( 5327 SchemaBackendCfg backendCfg) 5328 { 5329 ResultCode resultCode = ResultCode.SUCCESS; 5330 boolean adminActionRequired = false; 5331 ArrayList<Message> messages = new ArrayList<Message>(); 5332 5333 5334 // Check to see if we should apply a new set of base DNs. 5335 Set<DN> newBaseDNs; 5336 try 5337 { 5338 newBaseDNs = new HashSet<DN>(backendCfg.getSchemaEntryDN()); 5339 if (newBaseDNs.isEmpty()) 5340 { 5341 newBaseDNs.add(DN.decode(DN_DEFAULT_SCHEMA_ROOT)); 5342 } 5343 } 5344 catch (Exception e) 5345 { 5346 if (debugEnabled()) 5347 { 5348 TRACER.debugCaught(DebugLogLevel.ERROR, e); 5349 } 5350 5351 5352 messages.add(ERR_SCHEMA_CANNOT_DETERMINE_BASE_DN.get( 5353 String.valueOf(configEntryDN), 5354 getExceptionMessage(e))); 5355 resultCode = DirectoryServer.getServerErrorResultCode(); 5356 newBaseDNs = null; 5357 } 5358 5359 5360 // Check to see if we should change the behavior regarding whether to show 5361 // all schema attributes. 5362 boolean newShowAllAttributes = backendCfg.isShowAllAttributes(); 5363 5364 5365 // Check to see if there is a new set of user-defined attributes. 5366 ArrayList<Attribute> newUserAttrs = new ArrayList<Attribute>(); 5367 try 5368 { 5369 ConfigEntry configEntry = DirectoryServer.getConfigEntry(configEntryDN); 5370 for (List<Attribute> attrs : 5371 configEntry.getEntry().getUserAttributes().values()) 5372 { 5373 for (Attribute a : attrs) 5374 { 5375 if (! isSchemaConfigAttribute(a)) 5376 { 5377 newUserAttrs.add(a); 5378 } 5379 } 5380 } 5381 for (List<Attribute> attrs : 5382 configEntry.getEntry().getOperationalAttributes().values()) 5383 { 5384 for (Attribute a : attrs) 5385 { 5386 if (! isSchemaConfigAttribute(a)) 5387 { 5388 newUserAttrs.add(a); 5389 } 5390 } 5391 } 5392 } 5393 catch (ConfigException e) 5394 { 5395 if (debugEnabled()) 5396 { 5397 TRACER.debugCaught(DebugLogLevel.ERROR, e); 5398 } 5399 5400 messages.add(ERR_CONFIG_BACKEND_ERROR_INTERACTING_WITH_BACKEND_ENTRY.get( 5401 String.valueOf(configEntryDN), 5402 stackTraceToSingleLineString(e))); 5403 resultCode = DirectoryServer.getServerErrorResultCode(); 5404 } 5405 5406 5407 if (resultCode == ResultCode.SUCCESS) 5408 { 5409 // Get an array containing the new base DNs to use. 5410 DN[] dnArray = new DN[newBaseDNs.size()]; 5411 newBaseDNs.toArray(dnArray); 5412 5413 5414 // Determine the set of DNs to add and delete. When this is done, the 5415 // deleteBaseDNs will contain the set of DNs that should no longer be used 5416 // and should be deregistered from the server, and the newBaseDNs set will 5417 // just contain the set of DNs to add. 5418 HashSet<DN> deleteBaseDNs = new HashSet<DN>(baseDNs.length); 5419 for (DN baseDN : baseDNs) 5420 { 5421 if (! newBaseDNs.remove(baseDN)) 5422 { 5423 deleteBaseDNs.add(baseDN); 5424 } 5425 } 5426 5427 for (DN dn : deleteBaseDNs) 5428 { 5429 try 5430 { 5431 DirectoryServer.deregisterBaseDN(dn); 5432 messages.add(INFO_SCHEMA_DEREGISTERED_BASE_DN.get( 5433 String.valueOf(dn))); 5434 } 5435 catch (Exception e) 5436 { 5437 if (debugEnabled()) 5438 { 5439 TRACER.debugCaught(DebugLogLevel.ERROR, e); 5440 } 5441 5442 messages.add(ERR_SCHEMA_CANNOT_DEREGISTER_BASE_DN.get( 5443 String.valueOf(dn), 5444 getExceptionMessage(e))); 5445 resultCode = DirectoryServer.getServerErrorResultCode(); 5446 } 5447 } 5448 5449 baseDNs = dnArray; 5450 for (DN dn : newBaseDNs) 5451 { 5452 try 5453 { 5454 DirectoryServer.registerBaseDN(dn, this, true); 5455 messages.add(INFO_SCHEMA_REGISTERED_BASE_DN.get(String.valueOf(dn))); 5456 } 5457 catch (Exception e) 5458 { 5459 if (debugEnabled()) 5460 { 5461 TRACER.debugCaught(DebugLogLevel.ERROR, e); 5462 } 5463 5464 messages.add(ERR_SCHEMA_CANNOT_REGISTER_BASE_DN.get( 5465 String.valueOf(dn), 5466 getExceptionMessage(e))); 5467 resultCode = DirectoryServer.getServerErrorResultCode(); 5468 } 5469 } 5470 5471 5472 showAllAttributes = newShowAllAttributes; 5473 5474 5475 userDefinedAttributes = newUserAttrs; 5476 Message message = INFO_SCHEMA_USING_NEW_USER_ATTRS.get(); 5477 messages.add(message); 5478 } 5479 5480 5481 currentConfig = backendCfg; 5482 return new ConfigChangeResult(resultCode, adminActionRequired, messages); 5483 } 5484 5485 5486 5487 /** 5488 * Indicates whether to treat common schema attributes like user attributes 5489 * rather than operational attributes. 5490 * 5491 * @return {@code true} if common attributes should be treated like user 5492 * attributes, or {@code false} if not. 5493 */ 5494 boolean showAllAttributes() 5495 { 5496 return showAllAttributes; 5497 } 5498 5499 5500 5501 /** 5502 * Specifies whether to treat common schema attributes like user attributes 5503 * rather than operational attributes. 5504 * 5505 * @param showAllAttributes Specifies whether to treat common schema 5506 * attributes like user attributes rather than 5507 * operational attributes. 5508 */ 5509 void setShowAllAttributes(boolean showAllAttributes) 5510 { 5511 this.showAllAttributes = showAllAttributes; 5512 } 5513 5514 5515 5516 /** 5517 * {@inheritDoc} 5518 */ 5519 public DN getComponentEntryDN() 5520 { 5521 return configEntryDN; 5522 } 5523 5524 5525 5526 /** 5527 * {@inheritDoc} 5528 */ 5529 public String getClassName() 5530 { 5531 return CLASS_NAME; 5532 } 5533 5534 5535 5536 /** 5537 * {@inheritDoc} 5538 */ 5539 public LinkedHashMap<String,String> getAlerts() 5540 { 5541 LinkedHashMap<String,String> alerts = new LinkedHashMap<String,String>(); 5542 5543 alerts.put(ALERT_TYPE_CANNOT_COPY_SCHEMA_FILES, 5544 ALERT_DESCRIPTION_CANNOT_COPY_SCHEMA_FILES); 5545 alerts.put(ALERT_TYPE_CANNOT_WRITE_NEW_SCHEMA_FILES, 5546 ALERT_DESCRIPTION_CANNOT_WRITE_NEW_SCHEMA_FILES); 5547 5548 return alerts; 5549 } 5550 5551 5552 5553 /** 5554 * Returns an attribute that has the minimum upper bound value removed from 5555 * all of its attribute values. 5556 * 5557 * @param valueSet The original valueset containing the 5558 * attribute type definitions. 5559 * 5560 * @return Attribute that with all of the minimum upper bound values removed 5561 * from its attribute values. 5562 */ 5563 private Attribute 5564 stripMinUpperBoundValues(LinkedHashSet<AttributeValue> valueSet) { 5565 5566 LinkedHashSet<AttributeValue> valueSetCopy = 5567 new LinkedHashSet<AttributeValue>(); 5568 for(AttributeValue v : valueSet) { 5569 //If it exists, strip the minimum upper bound value from the 5570 //attribute value. 5571 if(v.toString().indexOf('{') != -1) { 5572 //Create an attribute value from the stripped string and add it to the 5573 //valueset. 5574 String strippedStr= 5575 v.toString().replaceFirst(stripMinUpperBoundRegEx, ""); 5576 ASN1OctetString s=new ASN1OctetString(strippedStr); 5577 AttributeValue strippedVal=new AttributeValue(s,s); 5578 valueSetCopy.add(strippedVal); 5579 } else 5580 valueSetCopy.add(v); 5581 } 5582 return 5583 new Attribute(attributeTypesType, ATTR_ATTRIBUTE_TYPES, valueSetCopy); 5584 } 5585 5586 5587 5588 /** 5589 * {@inheritDoc} 5590 */ 5591 public void preloadEntryCache() throws UnsupportedOperationException { 5592 throw new UnsupportedOperationException("Operation not supported."); 5593 } 5594 } 5595